1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "TargetInfo.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGValue.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "clang/CodeGen/CGFunctionInfo.h"
24 #include "clang/CodeGen/SwiftCallingConv.h"
25 #include "llvm/ADT/SmallBitVector.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include <algorithm> // std::sort
34
35 using namespace clang;
36 using namespace CodeGen;
37
38 // Helper for coercing an aggregate argument or return value into an integer
39 // array of the same size (including padding) and alignment. This alternate
40 // coercion happens only for the RenderScript ABI and can be removed after
41 // runtimes that rely on it are no longer supported.
42 //
43 // RenderScript assumes that the size of the argument / return value in the IR
44 // is the same as the size of the corresponding qualified type. This helper
45 // coerces the aggregate type into an array of the same size (including
46 // padding). This coercion is used in lieu of expansion of struct members or
47 // other canonical coercions that return a coerced-type of larger size.
48 //
49 // Ty - The argument / return value type
50 // Context - The associated ASTContext
51 // LLVMContext - The associated LLVMContext
coerceToIntArray(QualType Ty,ASTContext & Context,llvm::LLVMContext & LLVMContext)52 static ABIArgInfo coerceToIntArray(QualType Ty,
53 ASTContext &Context,
54 llvm::LLVMContext &LLVMContext) {
55 // Alignment and Size are measured in bits.
56 const uint64_t Size = Context.getTypeSize(Ty);
57 const uint64_t Alignment = Context.getTypeAlign(Ty);
58 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
59 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
60 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
61 }
62
AssignToArrayRange(CodeGen::CGBuilderTy & Builder,llvm::Value * Array,llvm::Value * Value,unsigned FirstIndex,unsigned LastIndex)63 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
64 llvm::Value *Array,
65 llvm::Value *Value,
66 unsigned FirstIndex,
67 unsigned LastIndex) {
68 // Alternatively, we could emit this as a loop in the source.
69 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 llvm::Value *Cell =
71 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
72 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
73 }
74 }
75
isAggregateTypeForABI(QualType T)76 static bool isAggregateTypeForABI(QualType T) {
77 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
78 T->isMemberFunctionPointerType();
79 }
80
81 ABIArgInfo
getNaturalAlignIndirect(QualType Ty,bool ByRef,bool Realign,llvm::Type * Padding) const82 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
83 llvm::Type *Padding) const {
84 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
85 ByRef, Realign, Padding);
86 }
87
88 ABIArgInfo
getNaturalAlignIndirectInReg(QualType Ty,bool Realign) const89 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
90 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
91 /*ByRef*/ false, Realign);
92 }
93
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const94 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
95 QualType Ty) const {
96 return Address::invalid();
97 }
98
~ABIInfo()99 ABIInfo::~ABIInfo() {}
100
101 /// Does the given lowering require more than the given number of
102 /// registers when expanded?
103 ///
104 /// This is intended to be the basis of a reasonable basic implementation
105 /// of should{Pass,Return}IndirectlyForSwift.
106 ///
107 /// For most targets, a limit of four total registers is reasonable; this
108 /// limits the amount of code required in order to move around the value
109 /// in case it wasn't produced immediately prior to the call by the caller
110 /// (or wasn't produced in exactly the right registers) or isn't used
111 /// immediately within the callee. But some targets may need to further
112 /// limit the register count due to an inability to support that many
113 /// return registers.
occupiesMoreThan(CodeGenTypes & cgt,ArrayRef<llvm::Type * > scalarTypes,unsigned maxAllRegisters)114 static bool occupiesMoreThan(CodeGenTypes &cgt,
115 ArrayRef<llvm::Type*> scalarTypes,
116 unsigned maxAllRegisters) {
117 unsigned intCount = 0, fpCount = 0;
118 for (llvm::Type *type : scalarTypes) {
119 if (type->isPointerTy()) {
120 intCount++;
121 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
122 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
123 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 } else {
125 assert(type->isVectorTy() || type->isFloatingPointTy());
126 fpCount++;
127 }
128 }
129
130 return (intCount + fpCount > maxAllRegisters);
131 }
132
isLegalVectorTypeForSwift(CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts) const133 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
134 llvm::Type *eltTy,
135 unsigned numElts) const {
136 // The default implementation of this assumes that the target guarantees
137 // 128-bit SIMD support but nothing more.
138 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
139 }
140
getRecordArgABI(const RecordType * RT,CGCXXABI & CXXABI)141 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
142 CGCXXABI &CXXABI) {
143 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
144 if (!RD) {
145 if (!RT->getDecl()->canPassInRegisters())
146 return CGCXXABI::RAA_Indirect;
147 return CGCXXABI::RAA_Default;
148 }
149 return CXXABI.getRecordArgABI(RD);
150 }
151
getRecordArgABI(QualType T,CGCXXABI & CXXABI)152 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
153 CGCXXABI &CXXABI) {
154 const RecordType *RT = T->getAs<RecordType>();
155 if (!RT)
156 return CGCXXABI::RAA_Default;
157 return getRecordArgABI(RT, CXXABI);
158 }
159
classifyReturnType(const CGCXXABI & CXXABI,CGFunctionInfo & FI,const ABIInfo & Info)160 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
161 const ABIInfo &Info) {
162 QualType Ty = FI.getReturnType();
163
164 if (const auto *RT = Ty->getAs<RecordType>())
165 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
166 !RT->getDecl()->canPassInRegisters()) {
167 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
168 return true;
169 }
170
171 return CXXABI.classifyReturnType(FI);
172 }
173
174 /// Pass transparent unions as if they were the type of the first element. Sema
175 /// should ensure that all elements of the union have the same "machine type".
useFirstFieldIfTransparentUnion(QualType Ty)176 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
177 if (const RecordType *UT = Ty->getAsUnionType()) {
178 const RecordDecl *UD = UT->getDecl();
179 if (UD->hasAttr<TransparentUnionAttr>()) {
180 assert(!UD->field_empty() && "sema created an empty transparent union");
181 return UD->field_begin()->getType();
182 }
183 }
184 return Ty;
185 }
186
getCXXABI() const187 CGCXXABI &ABIInfo::getCXXABI() const {
188 return CGT.getCXXABI();
189 }
190
getContext() const191 ASTContext &ABIInfo::getContext() const {
192 return CGT.getContext();
193 }
194
getVMContext() const195 llvm::LLVMContext &ABIInfo::getVMContext() const {
196 return CGT.getLLVMContext();
197 }
198
getDataLayout() const199 const llvm::DataLayout &ABIInfo::getDataLayout() const {
200 return CGT.getDataLayout();
201 }
202
getTarget() const203 const TargetInfo &ABIInfo::getTarget() const {
204 return CGT.getTarget();
205 }
206
getCodeGenOpts() const207 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
208 return CGT.getCodeGenOpts();
209 }
210
isAndroid() const211 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
212
isHomogeneousAggregateBaseType(QualType Ty) const213 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
214 return false;
215 }
216
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const217 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
218 uint64_t Members) const {
219 return false;
220 }
221
dump() const222 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
223 raw_ostream &OS = llvm::errs();
224 OS << "(ABIArgInfo Kind=";
225 switch (TheKind) {
226 case Direct:
227 OS << "Direct Type=";
228 if (llvm::Type *Ty = getCoerceToType())
229 Ty->print(OS);
230 else
231 OS << "null";
232 break;
233 case Extend:
234 OS << "Extend";
235 break;
236 case Ignore:
237 OS << "Ignore";
238 break;
239 case InAlloca:
240 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
241 break;
242 case Indirect:
243 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
244 << " ByVal=" << getIndirectByVal()
245 << " Realign=" << getIndirectRealign();
246 break;
247 case Expand:
248 OS << "Expand";
249 break;
250 case CoerceAndExpand:
251 OS << "CoerceAndExpand Type=";
252 getCoerceAndExpandType()->print(OS);
253 break;
254 }
255 OS << ")\n";
256 }
257
258 // Dynamically round a pointer up to a multiple of the given alignment.
emitRoundPointerUpToAlignment(CodeGenFunction & CGF,llvm::Value * Ptr,CharUnits Align)259 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
260 llvm::Value *Ptr,
261 CharUnits Align) {
262 llvm::Value *PtrAsInt = Ptr;
263 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
264 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
265 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
266 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
267 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
268 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
269 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
270 Ptr->getType(),
271 Ptr->getName() + ".aligned");
272 return PtrAsInt;
273 }
274
275 /// Emit va_arg for a platform using the common void* representation,
276 /// where arguments are simply emitted in an array of slots on the stack.
277 ///
278 /// This version implements the core direct-value passing rules.
279 ///
280 /// \param SlotSize - The size and alignment of a stack slot.
281 /// Each argument will be allocated to a multiple of this number of
282 /// slots, and all the slots will be aligned to this value.
283 /// \param AllowHigherAlign - The slot alignment is not a cap;
284 /// an argument type with an alignment greater than the slot size
285 /// will be emitted on a higher-alignment address, potentially
286 /// leaving one or more empty slots behind as padding. If this
287 /// is false, the returned address might be less-aligned than
288 /// DirectAlign.
emitVoidPtrDirectVAArg(CodeGenFunction & CGF,Address VAListAddr,llvm::Type * DirectTy,CharUnits DirectSize,CharUnits DirectAlign,CharUnits SlotSize,bool AllowHigherAlign)289 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
290 Address VAListAddr,
291 llvm::Type *DirectTy,
292 CharUnits DirectSize,
293 CharUnits DirectAlign,
294 CharUnits SlotSize,
295 bool AllowHigherAlign) {
296 // Cast the element type to i8* if necessary. Some platforms define
297 // va_list as a struct containing an i8* instead of just an i8*.
298 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
299 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
300
301 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
302
303 // If the CC aligns values higher than the slot size, do so if needed.
304 Address Addr = Address::invalid();
305 if (AllowHigherAlign && DirectAlign > SlotSize) {
306 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
307 DirectAlign);
308 } else {
309 Addr = Address(Ptr, SlotSize);
310 }
311
312 // Advance the pointer past the argument, then store that back.
313 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
314 Address NextPtr =
315 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
316 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
317
318 // If the argument is smaller than a slot, and this is a big-endian
319 // target, the argument will be right-adjusted in its slot.
320 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
321 !DirectTy->isStructTy()) {
322 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
323 }
324
325 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
326 return Addr;
327 }
328
329 /// Emit va_arg for a platform using the common void* representation,
330 /// where arguments are simply emitted in an array of slots on the stack.
331 ///
332 /// \param IsIndirect - Values of this type are passed indirectly.
333 /// \param ValueInfo - The size and alignment of this type, generally
334 /// computed with getContext().getTypeInfoInChars(ValueTy).
335 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
336 /// Each argument will be allocated to a multiple of this number of
337 /// slots, and all the slots will be aligned to this value.
338 /// \param AllowHigherAlign - The slot alignment is not a cap;
339 /// an argument type with an alignment greater than the slot size
340 /// will be emitted on a higher-alignment address, potentially
341 /// leaving one or more empty slots behind as padding.
emitVoidPtrVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType ValueTy,bool IsIndirect,std::pair<CharUnits,CharUnits> ValueInfo,CharUnits SlotSizeAndAlign,bool AllowHigherAlign)342 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
343 QualType ValueTy, bool IsIndirect,
344 std::pair<CharUnits, CharUnits> ValueInfo,
345 CharUnits SlotSizeAndAlign,
346 bool AllowHigherAlign) {
347 // The size and alignment of the value that was passed directly.
348 CharUnits DirectSize, DirectAlign;
349 if (IsIndirect) {
350 DirectSize = CGF.getPointerSize();
351 DirectAlign = CGF.getPointerAlign();
352 } else {
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
355 }
356
357 // Cast the address we've calculated to the right type.
358 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
359 if (IsIndirect)
360 DirectTy = DirectTy->getPointerTo(0);
361
362 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
363 DirectSize, DirectAlign,
364 SlotSizeAndAlign,
365 AllowHigherAlign);
366
367 if (IsIndirect) {
368 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
369 }
370
371 return Addr;
372
373 }
374
emitMergePHI(CodeGenFunction & CGF,Address Addr1,llvm::BasicBlock * Block1,Address Addr2,llvm::BasicBlock * Block2,const llvm::Twine & Name="")375 static Address emitMergePHI(CodeGenFunction &CGF,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name = "") {
379 assert(Addr1.getType() == Addr2.getType());
380 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
381 PHI->addIncoming(Addr1.getPointer(), Block1);
382 PHI->addIncoming(Addr2.getPointer(), Block2);
383 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
384 return Address(PHI, Align);
385 }
386
~TargetCodeGenInfo()387 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
388
389 // If someone can figure out a general rule for this, that would be great.
390 // It's probably just doomed to be platform-dependent, though.
getSizeOfUnwindException() const391 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
392 // Verified for:
393 // x86-64 FreeBSD, Linux, Darwin
394 // x86-32 FreeBSD, Linux, Darwin
395 // PowerPC Linux, Darwin
396 // ARM Darwin (*not* EABI)
397 // AArch64 Linux
398 return 32;
399 }
400
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const401 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
402 const FunctionNoProtoType *fnType) const {
403 // The following conventions are known to require this to be false:
404 // x86_stdcall
405 // MIPS
406 // For everything else, we just prefer false unless we opt out.
407 return false;
408 }
409
410 void
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const411 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
412 llvm::SmallString<24> &Opt) const {
413 // This assumes the user is passing a library name like "rt" instead of a
414 // filename like "librt.a/so", and that they don't care whether it's static or
415 // dynamic.
416 Opt = "-l";
417 Opt += Lib;
418 }
419
getOpenCLKernelCallingConv() const420 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
421 // OpenCL kernels are called via an explicit runtime API with arguments
422 // set with clSetKernelArg(), not as normal sub-functions.
423 // Return SPIR_KERNEL by default as the kernel calling convention to
424 // ensure the fingerprint is fixed such way that each OpenCL argument
425 // gets one matching argument in the produced kernel function argument
426 // list to enable feasible implementation of clSetKernelArg() with
427 // aggregates etc. In case we would use the default C calling conv here,
428 // clSetKernelArg() might break depending on the target-specific
429 // conventions; different targets might split structs passed as values
430 // to multiple function arguments etc.
431 return llvm::CallingConv::SPIR_KERNEL;
432 }
433
getNullPointer(const CodeGen::CodeGenModule & CGM,llvm::PointerType * T,QualType QT) const434 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
435 llvm::PointerType *T, QualType QT) const {
436 return llvm::ConstantPointerNull::get(T);
437 }
438
getGlobalVarAddressSpace(CodeGenModule & CGM,const VarDecl * D) const439 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
440 const VarDecl *D) const {
441 assert(!CGM.getLangOpts().OpenCL &&
442 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
443 "Address space agnostic languages only");
444 return D ? D->getType().getAddressSpace() : LangAS::Default;
445 }
446
performAddrSpaceCast(CodeGen::CodeGenFunction & CGF,llvm::Value * Src,LangAS SrcAddr,LangAS DestAddr,llvm::Type * DestTy,bool isNonNull) const447 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
448 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
449 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
450 // Since target may map different address spaces in AST to the same address
451 // space, an address space conversion may end up as a bitcast.
452 if (auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
454 // Try to preserve the source's name to make IR more readable.
455 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
456 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
457 }
458
459 llvm::Constant *
performAddrSpaceCast(CodeGenModule & CGM,llvm::Constant * Src,LangAS SrcAddr,LangAS DestAddr,llvm::Type * DestTy) const460 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
461 LangAS SrcAddr, LangAS DestAddr,
462 llvm::Type *DestTy) const {
463 // Since target may map different address spaces in AST to the same address
464 // space, an address space conversion may end up as a bitcast.
465 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
466 }
467
468 llvm::SyncScope::ID
getLLVMSyncScopeID(const LangOptions & LangOpts,SyncScope Scope,llvm::AtomicOrdering Ordering,llvm::LLVMContext & Ctx) const469 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
470 SyncScope Scope,
471 llvm::AtomicOrdering Ordering,
472 llvm::LLVMContext &Ctx) const {
473 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
474 }
475
476 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
477
478 /// isEmptyField - Return true iff a the field is "empty", that is it
479 /// is an unnamed bit-field or an (array of) empty record(s).
isEmptyField(ASTContext & Context,const FieldDecl * FD,bool AllowArrays)480 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
481 bool AllowArrays) {
482 if (FD->isUnnamedBitfield())
483 return true;
484
485 QualType FT = FD->getType();
486
487 // Constant arrays of empty records count as empty, strip them off.
488 // Constant arrays of zero length always count as empty.
489 if (AllowArrays)
490 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
491 if (AT->getSize() == 0)
492 return true;
493 FT = AT->getElementType();
494 }
495
496 const RecordType *RT = FT->getAs<RecordType>();
497 if (!RT)
498 return false;
499
500 // C++ record fields are never empty, at least in the Itanium ABI.
501 //
502 // FIXME: We should use a predicate for whether this behavior is true in the
503 // current ABI.
504 if (isa<CXXRecordDecl>(RT->getDecl()))
505 return false;
506
507 return isEmptyRecord(Context, FT, AllowArrays);
508 }
509
510 /// isEmptyRecord - Return true iff a structure contains only empty
511 /// fields. Note that a structure with a flexible array member is not
512 /// considered empty.
isEmptyRecord(ASTContext & Context,QualType T,bool AllowArrays)513 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
514 const RecordType *RT = T->getAs<RecordType>();
515 if (!RT)
516 return false;
517 const RecordDecl *RD = RT->getDecl();
518 if (RD->hasFlexibleArrayMember())
519 return false;
520
521 // If this is a C++ record, check the bases first.
522 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
523 for (const auto &I : CXXRD->bases())
524 if (!isEmptyRecord(Context, I.getType(), true))
525 return false;
526
527 for (const auto *I : RD->fields())
528 if (!isEmptyField(Context, I, AllowArrays))
529 return false;
530 return true;
531 }
532
533 /// isSingleElementStruct - Determine if a structure is a "single
534 /// element struct", i.e. it has exactly one non-empty field or
535 /// exactly one field which is itself a single element
536 /// struct. Structures with flexible array members are never
537 /// considered single element structs.
538 ///
539 /// \return The field declaration for the single non-empty field, if
540 /// it exists.
isSingleElementStruct(QualType T,ASTContext & Context)541 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
542 const RecordType *RT = T->getAs<RecordType>();
543 if (!RT)
544 return nullptr;
545
546 const RecordDecl *RD = RT->getDecl();
547 if (RD->hasFlexibleArrayMember())
548 return nullptr;
549
550 const Type *Found = nullptr;
551
552 // If this is a C++ record, check the bases first.
553 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
554 for (const auto &I : CXXRD->bases()) {
555 // Ignore empty records.
556 if (isEmptyRecord(Context, I.getType(), true))
557 continue;
558
559 // If we already found an element then this isn't a single-element struct.
560 if (Found)
561 return nullptr;
562
563 // If this is non-empty and not a single element struct, the composite
564 // cannot be a single element struct.
565 Found = isSingleElementStruct(I.getType(), Context);
566 if (!Found)
567 return nullptr;
568 }
569 }
570
571 // Check for single element.
572 for (const auto *FD : RD->fields()) {
573 QualType FT = FD->getType();
574
575 // Ignore empty fields.
576 if (isEmptyField(Context, FD, true))
577 continue;
578
579 // If we already found an element then this isn't a single-element
580 // struct.
581 if (Found)
582 return nullptr;
583
584 // Treat single element arrays as the element.
585 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
586 if (AT->getSize().getZExtValue() != 1)
587 break;
588 FT = AT->getElementType();
589 }
590
591 if (!isAggregateTypeForABI(FT)) {
592 Found = FT.getTypePtr();
593 } else {
594 Found = isSingleElementStruct(FT, Context);
595 if (!Found)
596 return nullptr;
597 }
598 }
599
600 // We don't consider a struct a single-element struct if it has
601 // padding beyond the element type.
602 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
603 return nullptr;
604
605 return Found;
606 }
607
608 namespace {
EmitVAArgInstr(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,const ABIArgInfo & AI)609 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
610 const ABIArgInfo &AI) {
611 // This default implementation defers to the llvm backend's va_arg
612 // instruction. It can handle only passing arguments directly
613 // (typically only handled in the backend for primitive types), or
614 // aggregates passed indirectly by pointer (NOTE: if the "byval"
615 // flag has ABI impact in the callee, this implementation cannot
616 // work.)
617
618 // Only a few cases are covered here at the moment -- those needed
619 // by the default abi.
620 llvm::Value *Val;
621
622 if (AI.isIndirect()) {
623 assert(!AI.getPaddingType() &&
624 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
625 assert(
626 !AI.getIndirectRealign() &&
627 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
628
629 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
630 CharUnits TyAlignForABI = TyInfo.second;
631
632 llvm::Type *BaseTy =
633 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
634 llvm::Value *Addr =
635 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
636 return Address(Addr, TyAlignForABI);
637 } else {
638 assert((AI.isDirect() || AI.isExtend()) &&
639 "Unexpected ArgInfo Kind in generic VAArg emitter!");
640
641 assert(!AI.getInReg() &&
642 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
643 assert(!AI.getPaddingType() &&
644 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
645 assert(!AI.getDirectOffset() &&
646 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
647 assert(!AI.getCoerceToType() &&
648 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
649
650 Address Temp = CGF.CreateMemTemp(Ty, "varet");
651 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
652 CGF.Builder.CreateStore(Val, Temp);
653 return Temp;
654 }
655 }
656
657 /// DefaultABIInfo - The default implementation for ABI specific
658 /// details. This implementation provides information which results in
659 /// self-consistent and sensible LLVM IR generation, but does not
660 /// conform to any particular ABI.
661 class DefaultABIInfo : public ABIInfo {
662 public:
DefaultABIInfo(CodeGen::CodeGenTypes & CGT)663 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
664
665 ABIArgInfo classifyReturnType(QualType RetTy) const;
666 ABIArgInfo classifyArgumentType(QualType RetTy) const;
667
computeInfo(CGFunctionInfo & FI) const668 void computeInfo(CGFunctionInfo &FI) const override {
669 if (!getCXXABI().classifyReturnType(FI))
670 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
671 for (auto &I : FI.arguments())
672 I.info = classifyArgumentType(I.type);
673 }
674
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const675 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
676 QualType Ty) const override {
677 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
678 }
679 };
680
681 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
682 public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)683 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
684 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
685 };
686
classifyArgumentType(QualType Ty) const687 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
688 Ty = useFirstFieldIfTransparentUnion(Ty);
689
690 if (isAggregateTypeForABI(Ty)) {
691 // Records with non-trivial destructors/copy-constructors should not be
692 // passed by value.
693 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
694 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
695
696 return getNaturalAlignIndirect(Ty);
697 }
698
699 // Treat an enum type as its underlying type.
700 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
701 Ty = EnumTy->getDecl()->getIntegerType();
702
703 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
704 : ABIArgInfo::getDirect());
705 }
706
classifyReturnType(QualType RetTy) const707 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
708 if (RetTy->isVoidType())
709 return ABIArgInfo::getIgnore();
710
711 if (isAggregateTypeForABI(RetTy))
712 return getNaturalAlignIndirect(RetTy);
713
714 // Treat an enum type as its underlying type.
715 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
716 RetTy = EnumTy->getDecl()->getIntegerType();
717
718 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
719 : ABIArgInfo::getDirect());
720 }
721
722 //===----------------------------------------------------------------------===//
723 // WebAssembly ABI Implementation
724 //
725 // This is a very simple ABI that relies a lot on DefaultABIInfo.
726 //===----------------------------------------------------------------------===//
727
728 class WebAssemblyABIInfo final : public SwiftABIInfo {
729 DefaultABIInfo defaultInfo;
730
731 public:
WebAssemblyABIInfo(CodeGen::CodeGenTypes & CGT)732 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
733 : SwiftABIInfo(CGT), defaultInfo(CGT) {}
734
735 private:
736 ABIArgInfo classifyReturnType(QualType RetTy) const;
737 ABIArgInfo classifyArgumentType(QualType Ty) const;
738
739 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
740 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
741 // overload them.
computeInfo(CGFunctionInfo & FI) const742 void computeInfo(CGFunctionInfo &FI) const override {
743 if (!getCXXABI().classifyReturnType(FI))
744 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
745 for (auto &Arg : FI.arguments())
746 Arg.info = classifyArgumentType(Arg.type);
747 }
748
749 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
750 QualType Ty) const override;
751
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const752 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
753 bool asReturnValue) const override {
754 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
755 }
756
isSwiftErrorInRegister() const757 bool isSwiftErrorInRegister() const override {
758 return false;
759 }
760 };
761
762 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
763 public:
WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)764 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
765 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
766
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const767 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
768 CodeGen::CodeGenModule &CGM) const override {
769 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
770 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
771 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
772 llvm::Function *Fn = cast<llvm::Function>(GV);
773 llvm::AttrBuilder B;
774 B.addAttribute("wasm-import-module", Attr->getImportModule());
775 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
776 }
777 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
778 llvm::Function *Fn = cast<llvm::Function>(GV);
779 llvm::AttrBuilder B;
780 B.addAttribute("wasm-import-name", Attr->getImportName());
781 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
782 }
783 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
784 llvm::Function *Fn = cast<llvm::Function>(GV);
785 llvm::AttrBuilder B;
786 B.addAttribute("wasm-export-name", Attr->getExportName());
787 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
788 }
789 }
790
791 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
792 llvm::Function *Fn = cast<llvm::Function>(GV);
793 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
794 Fn->addFnAttr("no-prototype");
795 }
796 }
797 };
798
799 /// Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const800 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
801 Ty = useFirstFieldIfTransparentUnion(Ty);
802
803 if (isAggregateTypeForABI(Ty)) {
804 // Records with non-trivial destructors/copy-constructors should not be
805 // passed by value.
806 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
807 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
808 // Ignore empty structs/unions.
809 if (isEmptyRecord(getContext(), Ty, true))
810 return ABIArgInfo::getIgnore();
811 // Lower single-element structs to just pass a regular value. TODO: We
812 // could do reasonable-size multiple-element structs too, using getExpand(),
813 // though watch out for things like bitfields.
814 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
815 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
816 }
817
818 // Otherwise just do the default thing.
819 return defaultInfo.classifyArgumentType(Ty);
820 }
821
classifyReturnType(QualType RetTy) const822 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
823 if (isAggregateTypeForABI(RetTy)) {
824 // Records with non-trivial destructors/copy-constructors should not be
825 // returned by value.
826 if (!getRecordArgABI(RetTy, getCXXABI())) {
827 // Ignore empty structs/unions.
828 if (isEmptyRecord(getContext(), RetTy, true))
829 return ABIArgInfo::getIgnore();
830 // Lower single-element structs to just return a regular value. TODO: We
831 // could do reasonable-size multiple-element structs too, using
832 // ABIArgInfo::getDirect().
833 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
834 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
835 }
836 }
837
838 // Otherwise just do the default thing.
839 return defaultInfo.classifyReturnType(RetTy);
840 }
841
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const842 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
843 QualType Ty) const {
844 bool IsIndirect = isAggregateTypeForABI(Ty) &&
845 !isEmptyRecord(getContext(), Ty, true) &&
846 !isSingleElementStruct(Ty, getContext());
847 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
848 getContext().getTypeInfoInChars(Ty),
849 CharUnits::fromQuantity(4),
850 /*AllowHigherAlign=*/true);
851 }
852
853 //===----------------------------------------------------------------------===//
854 // le32/PNaCl bitcode ABI Implementation
855 //
856 // This is a simplified version of the x86_32 ABI. Arguments and return values
857 // are always passed on the stack.
858 //===----------------------------------------------------------------------===//
859
860 class PNaClABIInfo : public ABIInfo {
861 public:
PNaClABIInfo(CodeGen::CodeGenTypes & CGT)862 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
863
864 ABIArgInfo classifyReturnType(QualType RetTy) const;
865 ABIArgInfo classifyArgumentType(QualType RetTy) const;
866
867 void computeInfo(CGFunctionInfo &FI) const override;
868 Address EmitVAArg(CodeGenFunction &CGF,
869 Address VAListAddr, QualType Ty) const override;
870 };
871
872 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
873 public:
PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)874 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
875 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
876 };
877
computeInfo(CGFunctionInfo & FI) const878 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
879 if (!getCXXABI().classifyReturnType(FI))
880 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
881
882 for (auto &I : FI.arguments())
883 I.info = classifyArgumentType(I.type);
884 }
885
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const886 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
887 QualType Ty) const {
888 // The PNaCL ABI is a bit odd, in that varargs don't use normal
889 // function classification. Structs get passed directly for varargs
890 // functions, through a rewriting transform in
891 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
892 // this target to actually support a va_arg instructions with an
893 // aggregate type, unlike other targets.
894 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
895 }
896
897 /// Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const898 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
899 if (isAggregateTypeForABI(Ty)) {
900 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
901 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
902 return getNaturalAlignIndirect(Ty);
903 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
904 // Treat an enum type as its underlying type.
905 Ty = EnumTy->getDecl()->getIntegerType();
906 } else if (Ty->isFloatingType()) {
907 // Floating-point types don't go inreg.
908 return ABIArgInfo::getDirect();
909 }
910
911 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
912 : ABIArgInfo::getDirect());
913 }
914
classifyReturnType(QualType RetTy) const915 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
916 if (RetTy->isVoidType())
917 return ABIArgInfo::getIgnore();
918
919 // In the PNaCl ABI we always return records/structures on the stack.
920 if (isAggregateTypeForABI(RetTy))
921 return getNaturalAlignIndirect(RetTy);
922
923 // Treat an enum type as its underlying type.
924 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
925 RetTy = EnumTy->getDecl()->getIntegerType();
926
927 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
928 : ABIArgInfo::getDirect());
929 }
930
931 /// IsX86_MMXType - Return true if this is an MMX type.
IsX86_MMXType(llvm::Type * IRType)932 bool IsX86_MMXType(llvm::Type *IRType) {
933 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
934 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
935 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
936 IRType->getScalarSizeInBits() != 64;
937 }
938
X86AdjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)939 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
940 StringRef Constraint,
941 llvm::Type* Ty) {
942 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
943 .Cases("y", "&y", "^Ym", true)
944 .Default(false);
945 if (IsMMXCons && Ty->isVectorTy()) {
946 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
947 // Invalid MMX constraint
948 return nullptr;
949 }
950
951 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
952 }
953
954 // No operation needed
955 return Ty;
956 }
957
958 /// Returns true if this type can be passed in SSE registers with the
959 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorTypeForVectorCall(ASTContext & Context,QualType Ty)960 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
961 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
962 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
963 if (BT->getKind() == BuiltinType::LongDouble) {
964 if (&Context.getTargetInfo().getLongDoubleFormat() ==
965 &llvm::APFloat::x87DoubleExtended())
966 return false;
967 }
968 return true;
969 }
970 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
971 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
972 // registers specially.
973 unsigned VecSize = Context.getTypeSize(VT);
974 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
975 return true;
976 }
977 return false;
978 }
979
980 /// Returns true if this aggregate is small enough to be passed in SSE registers
981 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorCallAggregateSmallEnough(uint64_t NumMembers)982 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
983 return NumMembers <= 4;
984 }
985
986 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
getDirectX86Hva(llvm::Type * T=nullptr)987 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
988 auto AI = ABIArgInfo::getDirect(T);
989 AI.setInReg(true);
990 AI.setCanBeFlattened(false);
991 return AI;
992 }
993
994 //===----------------------------------------------------------------------===//
995 // X86-32 ABI Implementation
996 //===----------------------------------------------------------------------===//
997
998 /// Similar to llvm::CCState, but for Clang.
999 struct CCState {
CCState__anonecf8f19a0111::CCState1000 CCState(CGFunctionInfo &FI)
1001 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1002
1003 llvm::SmallBitVector IsPreassigned;
1004 unsigned CC = CallingConv::CC_C;
1005 unsigned FreeRegs = 0;
1006 unsigned FreeSSERegs = 0;
1007 };
1008
1009 enum {
1010 // Vectorcall only allows the first 6 parameters to be passed in registers.
1011 VectorcallMaxParamNumAsReg = 6
1012 };
1013
1014 /// X86_32ABIInfo - The X86-32 ABI information.
1015 class X86_32ABIInfo : public SwiftABIInfo {
1016 enum Class {
1017 Integer,
1018 Float
1019 };
1020
1021 static const unsigned MinABIStackAlignInBytes = 4;
1022
1023 bool IsDarwinVectorABI;
1024 bool IsRetSmallStructInRegABI;
1025 bool IsWin32StructABI;
1026 bool IsSoftFloatABI;
1027 bool IsMCUABI;
1028 unsigned DefaultNumRegisterParameters;
1029
isRegisterSize(unsigned Size)1030 static bool isRegisterSize(unsigned Size) {
1031 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1032 }
1033
isHomogeneousAggregateBaseType(QualType Ty) const1034 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1035 // FIXME: Assumes vectorcall is in use.
1036 return isX86VectorTypeForVectorCall(getContext(), Ty);
1037 }
1038
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const1039 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1040 uint64_t NumMembers) const override {
1041 // FIXME: Assumes vectorcall is in use.
1042 return isX86VectorCallAggregateSmallEnough(NumMembers);
1043 }
1044
1045 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1046
1047 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1048 /// such that the argument will be passed in memory.
1049 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1050
1051 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1052
1053 /// Return the alignment to use for the given type on the stack.
1054 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1055
1056 Class classify(QualType Ty) const;
1057 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1058 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1059
1060 /// Updates the number of available free registers, returns
1061 /// true if any registers were allocated.
1062 bool updateFreeRegs(QualType Ty, CCState &State) const;
1063
1064 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1065 bool &NeedsPadding) const;
1066 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1067
1068 bool canExpandIndirectArgument(QualType Ty) const;
1069
1070 /// Rewrite the function info so that all memory arguments use
1071 /// inalloca.
1072 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1073
1074 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1075 CharUnits &StackOffset, ABIArgInfo &Info,
1076 QualType Type) const;
1077 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1078
1079 public:
1080
1081 void computeInfo(CGFunctionInfo &FI) const override;
1082 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1083 QualType Ty) const override;
1084
X86_32ABIInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)1085 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1086 bool RetSmallStructInRegABI, bool Win32StructABI,
1087 unsigned NumRegisterParameters, bool SoftFloatABI)
1088 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1089 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1090 IsWin32StructABI(Win32StructABI),
1091 IsSoftFloatABI(SoftFloatABI),
1092 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1093 DefaultNumRegisterParameters(NumRegisterParameters) {}
1094
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const1095 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1096 bool asReturnValue) const override {
1097 // LLVM's x86-32 lowering currently only assigns up to three
1098 // integer registers and three fp registers. Oddly, it'll use up to
1099 // four vector registers for vectors, but those can overlap with the
1100 // scalar registers.
1101 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1102 }
1103
isSwiftErrorInRegister() const1104 bool isSwiftErrorInRegister() const override {
1105 // x86-32 lowering does not support passing swifterror in a register.
1106 return false;
1107 }
1108 };
1109
1110 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1111 public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)1112 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1113 bool RetSmallStructInRegABI, bool Win32StructABI,
1114 unsigned NumRegisterParameters, bool SoftFloatABI)
1115 : TargetCodeGenInfo(new X86_32ABIInfo(
1116 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1117 NumRegisterParameters, SoftFloatABI)) {}
1118
1119 static bool isStructReturnInRegABI(
1120 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1121
1122 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1123 CodeGen::CodeGenModule &CGM) const override;
1124
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1125 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1126 // Darwin uses different dwarf register numbers for EH.
1127 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1128 return 4;
1129 }
1130
1131 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1132 llvm::Value *Address) const override;
1133
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const1134 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1135 StringRef Constraint,
1136 llvm::Type* Ty) const override {
1137 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1138 }
1139
1140 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1141 std::string &Constraints,
1142 std::vector<llvm::Type *> &ResultRegTypes,
1143 std::vector<llvm::Type *> &ResultTruncRegTypes,
1144 std::vector<LValue> &ResultRegDests,
1145 std::string &AsmString,
1146 unsigned NumOutputs) const override;
1147
1148 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const1149 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1150 unsigned Sig = (0xeb << 0) | // jmp rel8
1151 (0x06 << 8) | // .+0x08
1152 ('v' << 16) |
1153 ('2' << 24);
1154 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1155 }
1156
getARCRetainAutoreleasedReturnValueMarker() const1157 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1158 return "movl\t%ebp, %ebp"
1159 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1160 }
1161 };
1162
1163 }
1164
1165 /// Rewrite input constraint references after adding some output constraints.
1166 /// In the case where there is one output and one input and we add one output,
1167 /// we need to replace all operand references greater than or equal to 1:
1168 /// mov $0, $1
1169 /// mov eax, $1
1170 /// The result will be:
1171 /// mov $0, $2
1172 /// mov eax, $2
rewriteInputConstraintReferences(unsigned FirstIn,unsigned NumNewOuts,std::string & AsmString)1173 static void rewriteInputConstraintReferences(unsigned FirstIn,
1174 unsigned NumNewOuts,
1175 std::string &AsmString) {
1176 std::string Buf;
1177 llvm::raw_string_ostream OS(Buf);
1178 size_t Pos = 0;
1179 while (Pos < AsmString.size()) {
1180 size_t DollarStart = AsmString.find('$', Pos);
1181 if (DollarStart == std::string::npos)
1182 DollarStart = AsmString.size();
1183 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1184 if (DollarEnd == std::string::npos)
1185 DollarEnd = AsmString.size();
1186 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1187 Pos = DollarEnd;
1188 size_t NumDollars = DollarEnd - DollarStart;
1189 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1190 // We have an operand reference.
1191 size_t DigitStart = Pos;
1192 if (AsmString[DigitStart] == '{') {
1193 OS << '{';
1194 ++DigitStart;
1195 }
1196 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1197 if (DigitEnd == std::string::npos)
1198 DigitEnd = AsmString.size();
1199 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1200 unsigned OperandIndex;
1201 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1202 if (OperandIndex >= FirstIn)
1203 OperandIndex += NumNewOuts;
1204 OS << OperandIndex;
1205 } else {
1206 OS << OperandStr;
1207 }
1208 Pos = DigitEnd;
1209 }
1210 }
1211 AsmString = std::move(OS.str());
1212 }
1213
1214 /// Add output constraints for EAX:EDX because they are return registers.
addReturnRegisterOutputs(CodeGenFunction & CGF,LValue ReturnSlot,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs) const1215 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1216 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1217 std::vector<llvm::Type *> &ResultRegTypes,
1218 std::vector<llvm::Type *> &ResultTruncRegTypes,
1219 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1220 unsigned NumOutputs) const {
1221 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1222
1223 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1224 // larger.
1225 if (!Constraints.empty())
1226 Constraints += ',';
1227 if (RetWidth <= 32) {
1228 Constraints += "={eax}";
1229 ResultRegTypes.push_back(CGF.Int32Ty);
1230 } else {
1231 // Use the 'A' constraint for EAX:EDX.
1232 Constraints += "=A";
1233 ResultRegTypes.push_back(CGF.Int64Ty);
1234 }
1235
1236 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1237 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1238 ResultTruncRegTypes.push_back(CoerceTy);
1239
1240 // Coerce the integer by bitcasting the return slot pointer.
1241 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1242 CoerceTy->getPointerTo()));
1243 ResultRegDests.push_back(ReturnSlot);
1244
1245 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1246 }
1247
1248 /// shouldReturnTypeInRegister - Determine if the given type should be
1249 /// returned in a register (for the Darwin and MCU ABI).
shouldReturnTypeInRegister(QualType Ty,ASTContext & Context) const1250 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1251 ASTContext &Context) const {
1252 uint64_t Size = Context.getTypeSize(Ty);
1253
1254 // For i386, type must be register sized.
1255 // For the MCU ABI, it only needs to be <= 8-byte
1256 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1257 return false;
1258
1259 if (Ty->isVectorType()) {
1260 // 64- and 128- bit vectors inside structures are not returned in
1261 // registers.
1262 if (Size == 64 || Size == 128)
1263 return false;
1264
1265 return true;
1266 }
1267
1268 // If this is a builtin, pointer, enum, complex type, member pointer, or
1269 // member function pointer it is ok.
1270 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1271 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1272 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1273 return true;
1274
1275 // Arrays are treated like records.
1276 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1277 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1278
1279 // Otherwise, it must be a record type.
1280 const RecordType *RT = Ty->getAs<RecordType>();
1281 if (!RT) return false;
1282
1283 // FIXME: Traverse bases here too.
1284
1285 // Structure types are passed in register if all fields would be
1286 // passed in a register.
1287 for (const auto *FD : RT->getDecl()->fields()) {
1288 // Empty fields are ignored.
1289 if (isEmptyField(Context, FD, true))
1290 continue;
1291
1292 // Check fields recursively.
1293 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1294 return false;
1295 }
1296 return true;
1297 }
1298
is32Or64BitBasicType(QualType Ty,ASTContext & Context)1299 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1300 // Treat complex types as the element type.
1301 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1302 Ty = CTy->getElementType();
1303
1304 // Check for a type which we know has a simple scalar argument-passing
1305 // convention without any padding. (We're specifically looking for 32
1306 // and 64-bit integer and integer-equivalents, float, and double.)
1307 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1308 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1309 return false;
1310
1311 uint64_t Size = Context.getTypeSize(Ty);
1312 return Size == 32 || Size == 64;
1313 }
1314
addFieldSizes(ASTContext & Context,const RecordDecl * RD,uint64_t & Size)1315 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1316 uint64_t &Size) {
1317 for (const auto *FD : RD->fields()) {
1318 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1319 // argument is smaller than 32-bits, expanding the struct will create
1320 // alignment padding.
1321 if (!is32Or64BitBasicType(FD->getType(), Context))
1322 return false;
1323
1324 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1325 // how to expand them yet, and the predicate for telling if a bitfield still
1326 // counts as "basic" is more complicated than what we were doing previously.
1327 if (FD->isBitField())
1328 return false;
1329
1330 Size += Context.getTypeSize(FD->getType());
1331 }
1332 return true;
1333 }
1334
addBaseAndFieldSizes(ASTContext & Context,const CXXRecordDecl * RD,uint64_t & Size)1335 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1336 uint64_t &Size) {
1337 // Don't do this if there are any non-empty bases.
1338 for (const CXXBaseSpecifier &Base : RD->bases()) {
1339 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1340 Size))
1341 return false;
1342 }
1343 if (!addFieldSizes(Context, RD, Size))
1344 return false;
1345 return true;
1346 }
1347
1348 /// Test whether an argument type which is to be passed indirectly (on the
1349 /// stack) would have the equivalent layout if it was expanded into separate
1350 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1351 /// optimizations.
canExpandIndirectArgument(QualType Ty) const1352 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1353 // We can only expand structure types.
1354 const RecordType *RT = Ty->getAs<RecordType>();
1355 if (!RT)
1356 return false;
1357 const RecordDecl *RD = RT->getDecl();
1358 uint64_t Size = 0;
1359 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1360 if (!IsWin32StructABI) {
1361 // On non-Windows, we have to conservatively match our old bitcode
1362 // prototypes in order to be ABI-compatible at the bitcode level.
1363 if (!CXXRD->isCLike())
1364 return false;
1365 } else {
1366 // Don't do this for dynamic classes.
1367 if (CXXRD->isDynamicClass())
1368 return false;
1369 }
1370 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1371 return false;
1372 } else {
1373 if (!addFieldSizes(getContext(), RD, Size))
1374 return false;
1375 }
1376
1377 // We can do this if there was no alignment padding.
1378 return Size == getContext().getTypeSize(Ty);
1379 }
1380
getIndirectReturnResult(QualType RetTy,CCState & State) const1381 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1382 // If the return value is indirect, then the hidden argument is consuming one
1383 // integer register.
1384 if (State.FreeRegs) {
1385 --State.FreeRegs;
1386 if (!IsMCUABI)
1387 return getNaturalAlignIndirectInReg(RetTy);
1388 }
1389 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1390 }
1391
classifyReturnType(QualType RetTy,CCState & State) const1392 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1393 CCState &State) const {
1394 if (RetTy->isVoidType())
1395 return ABIArgInfo::getIgnore();
1396
1397 const Type *Base = nullptr;
1398 uint64_t NumElts = 0;
1399 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1400 State.CC == llvm::CallingConv::X86_RegCall) &&
1401 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1402 // The LLVM struct type for such an aggregate should lower properly.
1403 return ABIArgInfo::getDirect();
1404 }
1405
1406 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1407 // On Darwin, some vectors are returned in registers.
1408 if (IsDarwinVectorABI) {
1409 uint64_t Size = getContext().getTypeSize(RetTy);
1410
1411 // 128-bit vectors are a special case; they are returned in
1412 // registers and we need to make sure to pick a type the LLVM
1413 // backend will like.
1414 if (Size == 128)
1415 return ABIArgInfo::getDirect(llvm::VectorType::get(
1416 llvm::Type::getInt64Ty(getVMContext()), 2));
1417
1418 // Always return in register if it fits in a general purpose
1419 // register, or if it is 64 bits and has a single element.
1420 if ((Size == 8 || Size == 16 || Size == 32) ||
1421 (Size == 64 && VT->getNumElements() == 1))
1422 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1423 Size));
1424
1425 return getIndirectReturnResult(RetTy, State);
1426 }
1427
1428 return ABIArgInfo::getDirect();
1429 }
1430
1431 if (isAggregateTypeForABI(RetTy)) {
1432 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1433 // Structures with flexible arrays are always indirect.
1434 if (RT->getDecl()->hasFlexibleArrayMember())
1435 return getIndirectReturnResult(RetTy, State);
1436 }
1437
1438 // If specified, structs and unions are always indirect.
1439 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1440 return getIndirectReturnResult(RetTy, State);
1441
1442 // Ignore empty structs/unions.
1443 if (isEmptyRecord(getContext(), RetTy, true))
1444 return ABIArgInfo::getIgnore();
1445
1446 // Small structures which are register sized are generally returned
1447 // in a register.
1448 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1449 uint64_t Size = getContext().getTypeSize(RetTy);
1450
1451 // As a special-case, if the struct is a "single-element" struct, and
1452 // the field is of type "float" or "double", return it in a
1453 // floating-point register. (MSVC does not apply this special case.)
1454 // We apply a similar transformation for pointer types to improve the
1455 // quality of the generated IR.
1456 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1457 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1458 || SeltTy->hasPointerRepresentation())
1459 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1460
1461 // FIXME: We should be able to narrow this integer in cases with dead
1462 // padding.
1463 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1464 }
1465
1466 return getIndirectReturnResult(RetTy, State);
1467 }
1468
1469 // Treat an enum type as its underlying type.
1470 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1471 RetTy = EnumTy->getDecl()->getIntegerType();
1472
1473 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
1474 : ABIArgInfo::getDirect());
1475 }
1476
isSSEVectorType(ASTContext & Context,QualType Ty)1477 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1478 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1479 }
1480
isRecordWithSSEVectorType(ASTContext & Context,QualType Ty)1481 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1482 const RecordType *RT = Ty->getAs<RecordType>();
1483 if (!RT)
1484 return 0;
1485 const RecordDecl *RD = RT->getDecl();
1486
1487 // If this is a C++ record, check the bases first.
1488 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1489 for (const auto &I : CXXRD->bases())
1490 if (!isRecordWithSSEVectorType(Context, I.getType()))
1491 return false;
1492
1493 for (const auto *i : RD->fields()) {
1494 QualType FT = i->getType();
1495
1496 if (isSSEVectorType(Context, FT))
1497 return true;
1498
1499 if (isRecordWithSSEVectorType(Context, FT))
1500 return true;
1501 }
1502
1503 return false;
1504 }
1505
getTypeStackAlignInBytes(QualType Ty,unsigned Align) const1506 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1507 unsigned Align) const {
1508 // Otherwise, if the alignment is less than or equal to the minimum ABI
1509 // alignment, just use the default; the backend will handle this.
1510 if (Align <= MinABIStackAlignInBytes)
1511 return 0; // Use default alignment.
1512
1513 // On non-Darwin, the stack type alignment is always 4.
1514 if (!IsDarwinVectorABI) {
1515 // Set explicit alignment, since we may need to realign the top.
1516 return MinABIStackAlignInBytes;
1517 }
1518
1519 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1520 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1521 isRecordWithSSEVectorType(getContext(), Ty)))
1522 return 16;
1523
1524 return MinABIStackAlignInBytes;
1525 }
1526
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const1527 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1528 CCState &State) const {
1529 if (!ByVal) {
1530 if (State.FreeRegs) {
1531 --State.FreeRegs; // Non-byval indirects just use one pointer.
1532 if (!IsMCUABI)
1533 return getNaturalAlignIndirectInReg(Ty);
1534 }
1535 return getNaturalAlignIndirect(Ty, false);
1536 }
1537
1538 // Compute the byval alignment.
1539 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1540 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1541 if (StackAlign == 0)
1542 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1543
1544 // If the stack alignment is less than the type alignment, realign the
1545 // argument.
1546 bool Realign = TypeAlign > StackAlign;
1547 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1548 /*ByVal=*/true, Realign);
1549 }
1550
classify(QualType Ty) const1551 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1552 const Type *T = isSingleElementStruct(Ty, getContext());
1553 if (!T)
1554 T = Ty.getTypePtr();
1555
1556 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1557 BuiltinType::Kind K = BT->getKind();
1558 if (K == BuiltinType::Float || K == BuiltinType::Double)
1559 return Float;
1560 }
1561 return Integer;
1562 }
1563
updateFreeRegs(QualType Ty,CCState & State) const1564 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1565 if (!IsSoftFloatABI) {
1566 Class C = classify(Ty);
1567 if (C == Float)
1568 return false;
1569 }
1570
1571 unsigned Size = getContext().getTypeSize(Ty);
1572 unsigned SizeInRegs = (Size + 31) / 32;
1573
1574 if (SizeInRegs == 0)
1575 return false;
1576
1577 if (!IsMCUABI) {
1578 if (SizeInRegs > State.FreeRegs) {
1579 State.FreeRegs = 0;
1580 return false;
1581 }
1582 } else {
1583 // The MCU psABI allows passing parameters in-reg even if there are
1584 // earlier parameters that are passed on the stack. Also,
1585 // it does not allow passing >8-byte structs in-register,
1586 // even if there are 3 free registers available.
1587 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1588 return false;
1589 }
1590
1591 State.FreeRegs -= SizeInRegs;
1592 return true;
1593 }
1594
shouldAggregateUseDirect(QualType Ty,CCState & State,bool & InReg,bool & NeedsPadding) const1595 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1596 bool &InReg,
1597 bool &NeedsPadding) const {
1598 // On Windows, aggregates other than HFAs are never passed in registers, and
1599 // they do not consume register slots. Homogenous floating-point aggregates
1600 // (HFAs) have already been dealt with at this point.
1601 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1602 return false;
1603
1604 NeedsPadding = false;
1605 InReg = !IsMCUABI;
1606
1607 if (!updateFreeRegs(Ty, State))
1608 return false;
1609
1610 if (IsMCUABI)
1611 return true;
1612
1613 if (State.CC == llvm::CallingConv::X86_FastCall ||
1614 State.CC == llvm::CallingConv::X86_VectorCall ||
1615 State.CC == llvm::CallingConv::X86_RegCall) {
1616 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1617 NeedsPadding = true;
1618
1619 return false;
1620 }
1621
1622 return true;
1623 }
1624
shouldPrimitiveUseInReg(QualType Ty,CCState & State) const1625 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1626 if (!updateFreeRegs(Ty, State))
1627 return false;
1628
1629 if (IsMCUABI)
1630 return false;
1631
1632 if (State.CC == llvm::CallingConv::X86_FastCall ||
1633 State.CC == llvm::CallingConv::X86_VectorCall ||
1634 State.CC == llvm::CallingConv::X86_RegCall) {
1635 if (getContext().getTypeSize(Ty) > 32)
1636 return false;
1637
1638 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1639 Ty->isReferenceType());
1640 }
1641
1642 return true;
1643 }
1644
runVectorCallFirstPass(CGFunctionInfo & FI,CCState & State) const1645 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1646 // Vectorcall x86 works subtly different than in x64, so the format is
1647 // a bit different than the x64 version. First, all vector types (not HVAs)
1648 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1649 // This differs from the x64 implementation, where the first 6 by INDEX get
1650 // registers.
1651 // In the second pass over the arguments, HVAs are passed in the remaining
1652 // vector registers if possible, or indirectly by address. The address will be
1653 // passed in ECX/EDX if available. Any other arguments are passed according to
1654 // the usual fastcall rules.
1655 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1656 for (int I = 0, E = Args.size(); I < E; ++I) {
1657 const Type *Base = nullptr;
1658 uint64_t NumElts = 0;
1659 const QualType &Ty = Args[I].type;
1660 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1661 isHomogeneousAggregate(Ty, Base, NumElts)) {
1662 if (State.FreeSSERegs >= NumElts) {
1663 State.FreeSSERegs -= NumElts;
1664 Args[I].info = ABIArgInfo::getDirect();
1665 State.IsPreassigned.set(I);
1666 }
1667 }
1668 }
1669 }
1670
classifyArgumentType(QualType Ty,CCState & State) const1671 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1672 CCState &State) const {
1673 // FIXME: Set alignment on indirect arguments.
1674 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1675 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1676 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1677
1678 Ty = useFirstFieldIfTransparentUnion(Ty);
1679
1680 // Check with the C++ ABI first.
1681 const RecordType *RT = Ty->getAs<RecordType>();
1682 if (RT) {
1683 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1684 if (RAA == CGCXXABI::RAA_Indirect) {
1685 return getIndirectResult(Ty, false, State);
1686 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1687 // The field index doesn't matter, we'll fix it up later.
1688 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1689 }
1690 }
1691
1692 // Regcall uses the concept of a homogenous vector aggregate, similar
1693 // to other targets.
1694 const Type *Base = nullptr;
1695 uint64_t NumElts = 0;
1696 if ((IsRegCall || IsVectorCall) &&
1697 isHomogeneousAggregate(Ty, Base, NumElts)) {
1698 if (State.FreeSSERegs >= NumElts) {
1699 State.FreeSSERegs -= NumElts;
1700
1701 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1702 // does.
1703 if (IsVectorCall)
1704 return getDirectX86Hva();
1705
1706 if (Ty->isBuiltinType() || Ty->isVectorType())
1707 return ABIArgInfo::getDirect();
1708 return ABIArgInfo::getExpand();
1709 }
1710 return getIndirectResult(Ty, /*ByVal=*/false, State);
1711 }
1712
1713 if (isAggregateTypeForABI(Ty)) {
1714 // Structures with flexible arrays are always indirect.
1715 // FIXME: This should not be byval!
1716 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1717 return getIndirectResult(Ty, true, State);
1718
1719 // Ignore empty structs/unions on non-Windows.
1720 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1721 return ABIArgInfo::getIgnore();
1722
1723 llvm::LLVMContext &LLVMContext = getVMContext();
1724 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1725 bool NeedsPadding = false;
1726 bool InReg;
1727 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1728 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1729 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1730 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1731 if (InReg)
1732 return ABIArgInfo::getDirectInReg(Result);
1733 else
1734 return ABIArgInfo::getDirect(Result);
1735 }
1736 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1737
1738 // Expand small (<= 128-bit) record types when we know that the stack layout
1739 // of those arguments will match the struct. This is important because the
1740 // LLVM backend isn't smart enough to remove byval, which inhibits many
1741 // optimizations.
1742 // Don't do this for the MCU if there are still free integer registers
1743 // (see X86_64 ABI for full explanation).
1744 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1745 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1746 return ABIArgInfo::getExpandWithPadding(
1747 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1748
1749 return getIndirectResult(Ty, true, State);
1750 }
1751
1752 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1753 // On Darwin, some vectors are passed in memory, we handle this by passing
1754 // it as an i8/i16/i32/i64.
1755 if (IsDarwinVectorABI) {
1756 uint64_t Size = getContext().getTypeSize(Ty);
1757 if ((Size == 8 || Size == 16 || Size == 32) ||
1758 (Size == 64 && VT->getNumElements() == 1))
1759 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1760 Size));
1761 }
1762
1763 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1764 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1765
1766 return ABIArgInfo::getDirect();
1767 }
1768
1769
1770 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1771 Ty = EnumTy->getDecl()->getIntegerType();
1772
1773 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1774
1775 if (Ty->isPromotableIntegerType()) {
1776 if (InReg)
1777 return ABIArgInfo::getExtendInReg(Ty);
1778 return ABIArgInfo::getExtend(Ty);
1779 }
1780
1781 if (InReg)
1782 return ABIArgInfo::getDirectInReg();
1783 return ABIArgInfo::getDirect();
1784 }
1785
computeInfo(CGFunctionInfo & FI) const1786 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1787 CCState State(FI);
1788 if (IsMCUABI)
1789 State.FreeRegs = 3;
1790 else if (State.CC == llvm::CallingConv::X86_FastCall)
1791 State.FreeRegs = 2;
1792 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1793 State.FreeRegs = 2;
1794 State.FreeSSERegs = 6;
1795 } else if (FI.getHasRegParm())
1796 State.FreeRegs = FI.getRegParm();
1797 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1798 State.FreeRegs = 5;
1799 State.FreeSSERegs = 8;
1800 } else
1801 State.FreeRegs = DefaultNumRegisterParameters;
1802
1803 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1804 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1805 } else if (FI.getReturnInfo().isIndirect()) {
1806 // The C++ ABI is not aware of register usage, so we have to check if the
1807 // return value was sret and put it in a register ourselves if appropriate.
1808 if (State.FreeRegs) {
1809 --State.FreeRegs; // The sret parameter consumes a register.
1810 if (!IsMCUABI)
1811 FI.getReturnInfo().setInReg(true);
1812 }
1813 }
1814
1815 // The chain argument effectively gives us another free register.
1816 if (FI.isChainCall())
1817 ++State.FreeRegs;
1818
1819 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1820 // arguments to XMM registers as available.
1821 if (State.CC == llvm::CallingConv::X86_VectorCall)
1822 runVectorCallFirstPass(FI, State);
1823
1824 bool UsedInAlloca = false;
1825 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1826 for (int I = 0, E = Args.size(); I < E; ++I) {
1827 // Skip arguments that have already been assigned.
1828 if (State.IsPreassigned.test(I))
1829 continue;
1830
1831 Args[I].info = classifyArgumentType(Args[I].type, State);
1832 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1833 }
1834
1835 // If we needed to use inalloca for any argument, do a second pass and rewrite
1836 // all the memory arguments to use inalloca.
1837 if (UsedInAlloca)
1838 rewriteWithInAlloca(FI);
1839 }
1840
1841 void
addFieldToArgStruct(SmallVector<llvm::Type *,6> & FrameFields,CharUnits & StackOffset,ABIArgInfo & Info,QualType Type) const1842 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1843 CharUnits &StackOffset, ABIArgInfo &Info,
1844 QualType Type) const {
1845 // Arguments are always 4-byte-aligned.
1846 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1847
1848 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1849 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1850 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1851 StackOffset += getContext().getTypeSizeInChars(Type);
1852
1853 // Insert padding bytes to respect alignment.
1854 CharUnits FieldEnd = StackOffset;
1855 StackOffset = FieldEnd.alignTo(FieldAlign);
1856 if (StackOffset != FieldEnd) {
1857 CharUnits NumBytes = StackOffset - FieldEnd;
1858 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1859 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1860 FrameFields.push_back(Ty);
1861 }
1862 }
1863
isArgInAlloca(const ABIArgInfo & Info)1864 static bool isArgInAlloca(const ABIArgInfo &Info) {
1865 // Leave ignored and inreg arguments alone.
1866 switch (Info.getKind()) {
1867 case ABIArgInfo::InAlloca:
1868 return true;
1869 case ABIArgInfo::Indirect:
1870 assert(Info.getIndirectByVal());
1871 return true;
1872 case ABIArgInfo::Ignore:
1873 return false;
1874 case ABIArgInfo::Direct:
1875 case ABIArgInfo::Extend:
1876 if (Info.getInReg())
1877 return false;
1878 return true;
1879 case ABIArgInfo::Expand:
1880 case ABIArgInfo::CoerceAndExpand:
1881 // These are aggregate types which are never passed in registers when
1882 // inalloca is involved.
1883 return true;
1884 }
1885 llvm_unreachable("invalid enum");
1886 }
1887
rewriteWithInAlloca(CGFunctionInfo & FI) const1888 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1889 assert(IsWin32StructABI && "inalloca only supported on win32");
1890
1891 // Build a packed struct type for all of the arguments in memory.
1892 SmallVector<llvm::Type *, 6> FrameFields;
1893
1894 // The stack alignment is always 4.
1895 CharUnits StackAlign = CharUnits::fromQuantity(4);
1896
1897 CharUnits StackOffset;
1898 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1899
1900 // Put 'this' into the struct before 'sret', if necessary.
1901 bool IsThisCall =
1902 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1903 ABIArgInfo &Ret = FI.getReturnInfo();
1904 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1905 isArgInAlloca(I->info)) {
1906 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1907 ++I;
1908 }
1909
1910 // Put the sret parameter into the inalloca struct if it's in memory.
1911 if (Ret.isIndirect() && !Ret.getInReg()) {
1912 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1913 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1914 // On Windows, the hidden sret parameter is always returned in eax.
1915 Ret.setInAllocaSRet(IsWin32StructABI);
1916 }
1917
1918 // Skip the 'this' parameter in ecx.
1919 if (IsThisCall)
1920 ++I;
1921
1922 // Put arguments passed in memory into the struct.
1923 for (; I != E; ++I) {
1924 if (isArgInAlloca(I->info))
1925 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1926 }
1927
1928 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1929 /*isPacked=*/true),
1930 StackAlign);
1931 }
1932
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const1933 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1934 Address VAListAddr, QualType Ty) const {
1935
1936 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1937
1938 // x86-32 changes the alignment of certain arguments on the stack.
1939 //
1940 // Just messing with TypeInfo like this works because we never pass
1941 // anything indirectly.
1942 TypeInfo.second = CharUnits::fromQuantity(
1943 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1944
1945 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1946 TypeInfo, CharUnits::fromQuantity(4),
1947 /*AllowHigherAlign*/ true);
1948 }
1949
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)1950 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1951 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1952 assert(Triple.getArch() == llvm::Triple::x86);
1953
1954 switch (Opts.getStructReturnConvention()) {
1955 case CodeGenOptions::SRCK_Default:
1956 break;
1957 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1958 return false;
1959 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1960 return true;
1961 }
1962
1963 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1964 return true;
1965
1966 switch (Triple.getOS()) {
1967 case llvm::Triple::DragonFly:
1968 case llvm::Triple::FreeBSD:
1969 case llvm::Triple::OpenBSD:
1970 case llvm::Triple::Win32:
1971 return true;
1972 default:
1973 return false;
1974 }
1975 }
1976
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1977 void X86_32TargetCodeGenInfo::setTargetAttributes(
1978 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1979 if (GV->isDeclaration())
1980 return;
1981 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1982 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1983 llvm::Function *Fn = cast<llvm::Function>(GV);
1984 Fn->addFnAttr("stackrealign");
1985 }
1986 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1987 llvm::Function *Fn = cast<llvm::Function>(GV);
1988 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1989 }
1990 }
1991 }
1992
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1993 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1994 CodeGen::CodeGenFunction &CGF,
1995 llvm::Value *Address) const {
1996 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1997
1998 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1999
2000 // 0-7 are the eight integer registers; the order is different
2001 // on Darwin (for EH), but the range is the same.
2002 // 8 is %eip.
2003 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2004
2005 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2006 // 12-16 are st(0..4). Not sure why we stop at 4.
2007 // These have size 16, which is sizeof(long double) on
2008 // platforms with 8-byte alignment for that type.
2009 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2010 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2011
2012 } else {
2013 // 9 is %eflags, which doesn't get a size on Darwin for some
2014 // reason.
2015 Builder.CreateAlignedStore(
2016 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2017 CharUnits::One());
2018
2019 // 11-16 are st(0..5). Not sure why we stop at 5.
2020 // These have size 12, which is sizeof(long double) on
2021 // platforms with 4-byte alignment for that type.
2022 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2023 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2024 }
2025
2026 return false;
2027 }
2028
2029 //===----------------------------------------------------------------------===//
2030 // X86-64 ABI Implementation
2031 //===----------------------------------------------------------------------===//
2032
2033
2034 namespace {
2035 /// The AVX ABI level for X86 targets.
2036 enum class X86AVXABILevel {
2037 None,
2038 AVX,
2039 AVX512
2040 };
2041
2042 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel)2043 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2044 switch (AVXLevel) {
2045 case X86AVXABILevel::AVX512:
2046 return 512;
2047 case X86AVXABILevel::AVX:
2048 return 256;
2049 case X86AVXABILevel::None:
2050 return 128;
2051 }
2052 llvm_unreachable("Unknown AVXLevel");
2053 }
2054
2055 /// X86_64ABIInfo - The X86_64 ABI information.
2056 class X86_64ABIInfo : public SwiftABIInfo {
2057 enum Class {
2058 Integer = 0,
2059 SSE,
2060 SSEUp,
2061 X87,
2062 X87Up,
2063 ComplexX87,
2064 NoClass,
2065 Memory
2066 };
2067
2068 /// merge - Implement the X86_64 ABI merging algorithm.
2069 ///
2070 /// Merge an accumulating classification \arg Accum with a field
2071 /// classification \arg Field.
2072 ///
2073 /// \param Accum - The accumulating classification. This should
2074 /// always be either NoClass or the result of a previous merge
2075 /// call. In addition, this should never be Memory (the caller
2076 /// should just return Memory for the aggregate).
2077 static Class merge(Class Accum, Class Field);
2078
2079 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2080 ///
2081 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2082 /// final MEMORY or SSE classes when necessary.
2083 ///
2084 /// \param AggregateSize - The size of the current aggregate in
2085 /// the classification process.
2086 ///
2087 /// \param Lo - The classification for the parts of the type
2088 /// residing in the low word of the containing object.
2089 ///
2090 /// \param Hi - The classification for the parts of the type
2091 /// residing in the higher words of the containing object.
2092 ///
2093 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2094
2095 /// classify - Determine the x86_64 register classes in which the
2096 /// given type T should be passed.
2097 ///
2098 /// \param Lo - The classification for the parts of the type
2099 /// residing in the low word of the containing object.
2100 ///
2101 /// \param Hi - The classification for the parts of the type
2102 /// residing in the high word of the containing object.
2103 ///
2104 /// \param OffsetBase - The bit offset of this type in the
2105 /// containing object. Some parameters are classified different
2106 /// depending on whether they straddle an eightbyte boundary.
2107 ///
2108 /// \param isNamedArg - Whether the argument in question is a "named"
2109 /// argument, as used in AMD64-ABI 3.5.7.
2110 ///
2111 /// If a word is unused its result will be NoClass; if a type should
2112 /// be passed in Memory then at least the classification of \arg Lo
2113 /// will be Memory.
2114 ///
2115 /// The \arg Lo class will be NoClass iff the argument is ignored.
2116 ///
2117 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2118 /// also be ComplexX87.
2119 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2120 bool isNamedArg) const;
2121
2122 llvm::Type *GetByteVectorType(QualType Ty) const;
2123 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2124 unsigned IROffset, QualType SourceTy,
2125 unsigned SourceOffset) const;
2126 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2127 unsigned IROffset, QualType SourceTy,
2128 unsigned SourceOffset) const;
2129
2130 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2131 /// such that the argument will be returned in memory.
2132 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2133
2134 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2135 /// such that the argument will be passed in memory.
2136 ///
2137 /// \param freeIntRegs - The number of free integer registers remaining
2138 /// available.
2139 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2140
2141 ABIArgInfo classifyReturnType(QualType RetTy) const;
2142
2143 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2144 unsigned &neededInt, unsigned &neededSSE,
2145 bool isNamedArg) const;
2146
2147 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2148 unsigned &NeededSSE) const;
2149
2150 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2151 unsigned &NeededSSE) const;
2152
2153 bool IsIllegalVectorType(QualType Ty) const;
2154
2155 /// The 0.98 ABI revision clarified a lot of ambiguities,
2156 /// unfortunately in ways that were not always consistent with
2157 /// certain previous compilers. In particular, platforms which
2158 /// required strict binary compatibility with older versions of GCC
2159 /// may need to exempt themselves.
honorsRevision0_98() const2160 bool honorsRevision0_98() const {
2161 return !getTarget().getTriple().isOSDarwin();
2162 }
2163
2164 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2165 /// classify it as INTEGER (for compatibility with older clang compilers).
classifyIntegerMMXAsSSE() const2166 bool classifyIntegerMMXAsSSE() const {
2167 // Clang <= 3.8 did not do this.
2168 if (getContext().getLangOpts().getClangABICompat() <=
2169 LangOptions::ClangABI::Ver3_8)
2170 return false;
2171
2172 const llvm::Triple &Triple = getTarget().getTriple();
2173 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2174 return false;
2175 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2176 return false;
2177 return true;
2178 }
2179
2180 // GCC classifies vectors of __int128 as memory.
passInt128VectorsInMem() const2181 bool passInt128VectorsInMem() const {
2182 // Clang <= 9.0 did not do this.
2183 if (getContext().getLangOpts().getClangABICompat() <=
2184 LangOptions::ClangABI::Ver9)
2185 return false;
2186
2187 const llvm::Triple &T = getTarget().getTriple();
2188 return T.isOSLinux() || T.isOSNetBSD();
2189 }
2190
2191 X86AVXABILevel AVXLevel;
2192 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2193 // 64-bit hardware.
2194 bool Has64BitPointers;
2195
2196 public:
X86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2197 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2198 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2199 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2200 }
2201
isPassedUsingAVXType(QualType type) const2202 bool isPassedUsingAVXType(QualType type) const {
2203 unsigned neededInt, neededSSE;
2204 // The freeIntRegs argument doesn't matter here.
2205 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2206 /*isNamedArg*/true);
2207 if (info.isDirect()) {
2208 llvm::Type *ty = info.getCoerceToType();
2209 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2210 return (vectorTy->getBitWidth() > 128);
2211 }
2212 return false;
2213 }
2214
2215 void computeInfo(CGFunctionInfo &FI) const override;
2216
2217 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2218 QualType Ty) const override;
2219 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2220 QualType Ty) const override;
2221
has64BitPointers() const2222 bool has64BitPointers() const {
2223 return Has64BitPointers;
2224 }
2225
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const2226 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2227 bool asReturnValue) const override {
2228 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2229 }
isSwiftErrorInRegister() const2230 bool isSwiftErrorInRegister() const override {
2231 return true;
2232 }
2233 };
2234
2235 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2236 class WinX86_64ABIInfo : public SwiftABIInfo {
2237 public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2238 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2239 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2240 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2241
2242 void computeInfo(CGFunctionInfo &FI) const override;
2243
2244 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2245 QualType Ty) const override;
2246
isHomogeneousAggregateBaseType(QualType Ty) const2247 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2248 // FIXME: Assumes vectorcall is in use.
2249 return isX86VectorTypeForVectorCall(getContext(), Ty);
2250 }
2251
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const2252 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2253 uint64_t NumMembers) const override {
2254 // FIXME: Assumes vectorcall is in use.
2255 return isX86VectorCallAggregateSmallEnough(NumMembers);
2256 }
2257
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const2258 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2259 bool asReturnValue) const override {
2260 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2261 }
2262
isSwiftErrorInRegister() const2263 bool isSwiftErrorInRegister() const override {
2264 return true;
2265 }
2266
2267 private:
2268 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2269 bool IsVectorCall, bool IsRegCall) const;
2270 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2271 const ABIArgInfo ¤t) const;
2272 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2273 bool IsVectorCall, bool IsRegCall) const;
2274
2275 X86AVXABILevel AVXLevel;
2276
2277 bool IsMingw64;
2278 };
2279
2280 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2281 public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2282 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2283 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2284
getABIInfo() const2285 const X86_64ABIInfo &getABIInfo() const {
2286 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2287 }
2288
2289 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2290 /// the autoreleaseRV/retainRV optimization.
shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const2291 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
2292 return true;
2293 }
2294
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2295 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2296 return 7;
2297 }
2298
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2299 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2300 llvm::Value *Address) const override {
2301 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2302
2303 // 0-15 are the 16 integer registers.
2304 // 16 is %rip.
2305 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2306 return false;
2307 }
2308
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const2309 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2310 StringRef Constraint,
2311 llvm::Type* Ty) const override {
2312 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2313 }
2314
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const2315 bool isNoProtoCallVariadic(const CallArgList &args,
2316 const FunctionNoProtoType *fnType) const override {
2317 // The default CC on x86-64 sets %al to the number of SSA
2318 // registers used, and GCC sets this when calling an unprototyped
2319 // function, so we override the default behavior. However, don't do
2320 // that when AVX types are involved: the ABI explicitly states it is
2321 // undefined, and it doesn't work in practice because of how the ABI
2322 // defines varargs anyway.
2323 if (fnType->getCallConv() == CC_C) {
2324 bool HasAVXType = false;
2325 for (CallArgList::const_iterator
2326 it = args.begin(), ie = args.end(); it != ie; ++it) {
2327 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2328 HasAVXType = true;
2329 break;
2330 }
2331 }
2332
2333 if (!HasAVXType)
2334 return true;
2335 }
2336
2337 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2338 }
2339
2340 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const2341 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2342 unsigned Sig = (0xeb << 0) | // jmp rel8
2343 (0x06 << 8) | // .+0x08
2344 ('v' << 16) |
2345 ('2' << 24);
2346 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2347 }
2348
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2349 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2350 CodeGen::CodeGenModule &CGM) const override {
2351 if (GV->isDeclaration())
2352 return;
2353 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2354 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2355 llvm::Function *Fn = cast<llvm::Function>(GV);
2356 Fn->addFnAttr("stackrealign");
2357 }
2358 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2359 llvm::Function *Fn = cast<llvm::Function>(GV);
2360 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2361 }
2362 }
2363 }
2364 };
2365
qualifyWindowsLibrary(llvm::StringRef Lib)2366 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2367 // If the argument does not end in .lib, automatically add the suffix.
2368 // If the argument contains a space, enclose it in quotes.
2369 // This matches the behavior of MSVC.
2370 bool Quote = (Lib.find(" ") != StringRef::npos);
2371 std::string ArgStr = Quote ? "\"" : "";
2372 ArgStr += Lib;
2373 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2374 ArgStr += ".lib";
2375 ArgStr += Quote ? "\"" : "";
2376 return ArgStr;
2377 }
2378
2379 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2380 public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters)2381 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2382 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2383 unsigned NumRegisterParameters)
2384 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2385 Win32StructABI, NumRegisterParameters, false) {}
2386
2387 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2388 CodeGen::CodeGenModule &CGM) const override;
2389
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2390 void getDependentLibraryOption(llvm::StringRef Lib,
2391 llvm::SmallString<24> &Opt) const override {
2392 Opt = "/DEFAULTLIB:";
2393 Opt += qualifyWindowsLibrary(Lib);
2394 }
2395
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2396 void getDetectMismatchOption(llvm::StringRef Name,
2397 llvm::StringRef Value,
2398 llvm::SmallString<32> &Opt) const override {
2399 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2400 }
2401 };
2402
addStackProbeTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM)2403 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2404 CodeGen::CodeGenModule &CGM) {
2405 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2406
2407 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2408 Fn->addFnAttr("stack-probe-size",
2409 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2410 if (CGM.getCodeGenOpts().NoStackArgProbe)
2411 Fn->addFnAttr("no-stack-arg-probe");
2412 }
2413 }
2414
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2415 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2416 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2417 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2418 if (GV->isDeclaration())
2419 return;
2420 addStackProbeTargetAttributes(D, GV, CGM);
2421 }
2422
2423 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2424 public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2425 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2426 X86AVXABILevel AVXLevel)
2427 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
2428
2429 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2430 CodeGen::CodeGenModule &CGM) const override;
2431
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2432 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2433 return 7;
2434 }
2435
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2436 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2437 llvm::Value *Address) const override {
2438 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2439
2440 // 0-15 are the 16 integer registers.
2441 // 16 is %rip.
2442 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2443 return false;
2444 }
2445
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2446 void getDependentLibraryOption(llvm::StringRef Lib,
2447 llvm::SmallString<24> &Opt) const override {
2448 Opt = "/DEFAULTLIB:";
2449 Opt += qualifyWindowsLibrary(Lib);
2450 }
2451
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2452 void getDetectMismatchOption(llvm::StringRef Name,
2453 llvm::StringRef Value,
2454 llvm::SmallString<32> &Opt) const override {
2455 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2456 }
2457 };
2458
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2459 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2460 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2461 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2462 if (GV->isDeclaration())
2463 return;
2464 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2465 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2466 llvm::Function *Fn = cast<llvm::Function>(GV);
2467 Fn->addFnAttr("stackrealign");
2468 }
2469 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2470 llvm::Function *Fn = cast<llvm::Function>(GV);
2471 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2472 }
2473 }
2474
2475 addStackProbeTargetAttributes(D, GV, CGM);
2476 }
2477 }
2478
postMerge(unsigned AggregateSize,Class & Lo,Class & Hi) const2479 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2480 Class &Hi) const {
2481 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2482 //
2483 // (a) If one of the classes is Memory, the whole argument is passed in
2484 // memory.
2485 //
2486 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2487 // memory.
2488 //
2489 // (c) If the size of the aggregate exceeds two eightbytes and the first
2490 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2491 // argument is passed in memory. NOTE: This is necessary to keep the
2492 // ABI working for processors that don't support the __m256 type.
2493 //
2494 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2495 //
2496 // Some of these are enforced by the merging logic. Others can arise
2497 // only with unions; for example:
2498 // union { _Complex double; unsigned; }
2499 //
2500 // Note that clauses (b) and (c) were added in 0.98.
2501 //
2502 if (Hi == Memory)
2503 Lo = Memory;
2504 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2505 Lo = Memory;
2506 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2507 Lo = Memory;
2508 if (Hi == SSEUp && Lo != SSE)
2509 Hi = SSE;
2510 }
2511
merge(Class Accum,Class Field)2512 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2513 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2514 // classified recursively so that always two fields are
2515 // considered. The resulting class is calculated according to
2516 // the classes of the fields in the eightbyte:
2517 //
2518 // (a) If both classes are equal, this is the resulting class.
2519 //
2520 // (b) If one of the classes is NO_CLASS, the resulting class is
2521 // the other class.
2522 //
2523 // (c) If one of the classes is MEMORY, the result is the MEMORY
2524 // class.
2525 //
2526 // (d) If one of the classes is INTEGER, the result is the
2527 // INTEGER.
2528 //
2529 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2530 // MEMORY is used as class.
2531 //
2532 // (f) Otherwise class SSE is used.
2533
2534 // Accum should never be memory (we should have returned) or
2535 // ComplexX87 (because this cannot be passed in a structure).
2536 assert((Accum != Memory && Accum != ComplexX87) &&
2537 "Invalid accumulated classification during merge.");
2538 if (Accum == Field || Field == NoClass)
2539 return Accum;
2540 if (Field == Memory)
2541 return Memory;
2542 if (Accum == NoClass)
2543 return Field;
2544 if (Accum == Integer || Field == Integer)
2545 return Integer;
2546 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2547 Accum == X87 || Accum == X87Up)
2548 return Memory;
2549 return SSE;
2550 }
2551
classify(QualType Ty,uint64_t OffsetBase,Class & Lo,Class & Hi,bool isNamedArg) const2552 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2553 Class &Lo, Class &Hi, bool isNamedArg) const {
2554 // FIXME: This code can be simplified by introducing a simple value class for
2555 // Class pairs with appropriate constructor methods for the various
2556 // situations.
2557
2558 // FIXME: Some of the split computations are wrong; unaligned vectors
2559 // shouldn't be passed in registers for example, so there is no chance they
2560 // can straddle an eightbyte. Verify & simplify.
2561
2562 Lo = Hi = NoClass;
2563
2564 Class &Current = OffsetBase < 64 ? Lo : Hi;
2565 Current = Memory;
2566
2567 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2568 BuiltinType::Kind k = BT->getKind();
2569
2570 if (k == BuiltinType::Void) {
2571 Current = NoClass;
2572 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2573 Lo = Integer;
2574 Hi = Integer;
2575 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2576 Current = Integer;
2577 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2578 Current = SSE;
2579 } else if (k == BuiltinType::LongDouble) {
2580 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2581 if (LDF == &llvm::APFloat::IEEEquad()) {
2582 Lo = SSE;
2583 Hi = SSEUp;
2584 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2585 Lo = X87;
2586 Hi = X87Up;
2587 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2588 Current = SSE;
2589 } else
2590 llvm_unreachable("unexpected long double representation!");
2591 }
2592 // FIXME: _Decimal32 and _Decimal64 are SSE.
2593 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2594 return;
2595 }
2596
2597 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2598 // Classify the underlying integer type.
2599 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2600 return;
2601 }
2602
2603 if (Ty->hasPointerRepresentation()) {
2604 Current = Integer;
2605 return;
2606 }
2607
2608 if (Ty->isMemberPointerType()) {
2609 if (Ty->isMemberFunctionPointerType()) {
2610 if (Has64BitPointers) {
2611 // If Has64BitPointers, this is an {i64, i64}, so classify both
2612 // Lo and Hi now.
2613 Lo = Hi = Integer;
2614 } else {
2615 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2616 // straddles an eightbyte boundary, Hi should be classified as well.
2617 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2618 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2619 if (EB_FuncPtr != EB_ThisAdj) {
2620 Lo = Hi = Integer;
2621 } else {
2622 Current = Integer;
2623 }
2624 }
2625 } else {
2626 Current = Integer;
2627 }
2628 return;
2629 }
2630
2631 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2632 uint64_t Size = getContext().getTypeSize(VT);
2633 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2634 // gcc passes the following as integer:
2635 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2636 // 2 bytes - <2 x char>, <1 x short>
2637 // 1 byte - <1 x char>
2638 Current = Integer;
2639
2640 // If this type crosses an eightbyte boundary, it should be
2641 // split.
2642 uint64_t EB_Lo = (OffsetBase) / 64;
2643 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2644 if (EB_Lo != EB_Hi)
2645 Hi = Lo;
2646 } else if (Size == 64) {
2647 QualType ElementType = VT->getElementType();
2648
2649 // gcc passes <1 x double> in memory. :(
2650 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2651 return;
2652
2653 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2654 // pass them as integer. For platforms where clang is the de facto
2655 // platform compiler, we must continue to use integer.
2656 if (!classifyIntegerMMXAsSSE() &&
2657 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2658 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2659 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2660 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2661 Current = Integer;
2662 else
2663 Current = SSE;
2664
2665 // If this type crosses an eightbyte boundary, it should be
2666 // split.
2667 if (OffsetBase && OffsetBase != 64)
2668 Hi = Lo;
2669 } else if (Size == 128 ||
2670 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2671 QualType ElementType = VT->getElementType();
2672
2673 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2674 if (passInt128VectorsInMem() && Size != 128 &&
2675 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2676 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2677 return;
2678
2679 // Arguments of 256-bits are split into four eightbyte chunks. The
2680 // least significant one belongs to class SSE and all the others to class
2681 // SSEUP. The original Lo and Hi design considers that types can't be
2682 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2683 // This design isn't correct for 256-bits, but since there're no cases
2684 // where the upper parts would need to be inspected, avoid adding
2685 // complexity and just consider Hi to match the 64-256 part.
2686 //
2687 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2688 // registers if they are "named", i.e. not part of the "..." of a
2689 // variadic function.
2690 //
2691 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2692 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2693 Lo = SSE;
2694 Hi = SSEUp;
2695 }
2696 return;
2697 }
2698
2699 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2700 QualType ET = getContext().getCanonicalType(CT->getElementType());
2701
2702 uint64_t Size = getContext().getTypeSize(Ty);
2703 if (ET->isIntegralOrEnumerationType()) {
2704 if (Size <= 64)
2705 Current = Integer;
2706 else if (Size <= 128)
2707 Lo = Hi = Integer;
2708 } else if (ET == getContext().FloatTy) {
2709 Current = SSE;
2710 } else if (ET == getContext().DoubleTy) {
2711 Lo = Hi = SSE;
2712 } else if (ET == getContext().LongDoubleTy) {
2713 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2714 if (LDF == &llvm::APFloat::IEEEquad())
2715 Current = Memory;
2716 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2717 Current = ComplexX87;
2718 else if (LDF == &llvm::APFloat::IEEEdouble())
2719 Lo = Hi = SSE;
2720 else
2721 llvm_unreachable("unexpected long double representation!");
2722 }
2723
2724 // If this complex type crosses an eightbyte boundary then it
2725 // should be split.
2726 uint64_t EB_Real = (OffsetBase) / 64;
2727 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2728 if (Hi == NoClass && EB_Real != EB_Imag)
2729 Hi = Lo;
2730
2731 return;
2732 }
2733
2734 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2735 // Arrays are treated like structures.
2736
2737 uint64_t Size = getContext().getTypeSize(Ty);
2738
2739 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2740 // than eight eightbytes, ..., it has class MEMORY.
2741 if (Size > 512)
2742 return;
2743
2744 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2745 // fields, it has class MEMORY.
2746 //
2747 // Only need to check alignment of array base.
2748 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2749 return;
2750
2751 // Otherwise implement simplified merge. We could be smarter about
2752 // this, but it isn't worth it and would be harder to verify.
2753 Current = NoClass;
2754 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2755 uint64_t ArraySize = AT->getSize().getZExtValue();
2756
2757 // The only case a 256-bit wide vector could be used is when the array
2758 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2759 // to work for sizes wider than 128, early check and fallback to memory.
2760 //
2761 if (Size > 128 &&
2762 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2763 return;
2764
2765 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2766 Class FieldLo, FieldHi;
2767 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2768 Lo = merge(Lo, FieldLo);
2769 Hi = merge(Hi, FieldHi);
2770 if (Lo == Memory || Hi == Memory)
2771 break;
2772 }
2773
2774 postMerge(Size, Lo, Hi);
2775 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2776 return;
2777 }
2778
2779 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2780 uint64_t Size = getContext().getTypeSize(Ty);
2781
2782 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2783 // than eight eightbytes, ..., it has class MEMORY.
2784 if (Size > 512)
2785 return;
2786
2787 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2788 // copy constructor or a non-trivial destructor, it is passed by invisible
2789 // reference.
2790 if (getRecordArgABI(RT, getCXXABI()))
2791 return;
2792
2793 const RecordDecl *RD = RT->getDecl();
2794
2795 // Assume variable sized types are passed in memory.
2796 if (RD->hasFlexibleArrayMember())
2797 return;
2798
2799 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2800
2801 // Reset Lo class, this will be recomputed.
2802 Current = NoClass;
2803
2804 // If this is a C++ record, classify the bases first.
2805 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2806 for (const auto &I : CXXRD->bases()) {
2807 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2808 "Unexpected base class!");
2809 const auto *Base =
2810 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2811
2812 // Classify this field.
2813 //
2814 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2815 // single eightbyte, each is classified separately. Each eightbyte gets
2816 // initialized to class NO_CLASS.
2817 Class FieldLo, FieldHi;
2818 uint64_t Offset =
2819 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2820 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2821 Lo = merge(Lo, FieldLo);
2822 Hi = merge(Hi, FieldHi);
2823 if (Lo == Memory || Hi == Memory) {
2824 postMerge(Size, Lo, Hi);
2825 return;
2826 }
2827 }
2828 }
2829
2830 // Classify the fields one at a time, merging the results.
2831 unsigned idx = 0;
2832 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2833 i != e; ++i, ++idx) {
2834 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2835 bool BitField = i->isBitField();
2836
2837 // Ignore padding bit-fields.
2838 if (BitField && i->isUnnamedBitfield())
2839 continue;
2840
2841 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2842 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2843 //
2844 // The only case a 256-bit wide vector could be used is when the struct
2845 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2846 // to work for sizes wider than 128, early check and fallback to memory.
2847 //
2848 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2849 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2850 Lo = Memory;
2851 postMerge(Size, Lo, Hi);
2852 return;
2853 }
2854 // Note, skip this test for bit-fields, see below.
2855 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2856 Lo = Memory;
2857 postMerge(Size, Lo, Hi);
2858 return;
2859 }
2860
2861 // Classify this field.
2862 //
2863 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2864 // exceeds a single eightbyte, each is classified
2865 // separately. Each eightbyte gets initialized to class
2866 // NO_CLASS.
2867 Class FieldLo, FieldHi;
2868
2869 // Bit-fields require special handling, they do not force the
2870 // structure to be passed in memory even if unaligned, and
2871 // therefore they can straddle an eightbyte.
2872 if (BitField) {
2873 assert(!i->isUnnamedBitfield());
2874 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2875 uint64_t Size = i->getBitWidthValue(getContext());
2876
2877 uint64_t EB_Lo = Offset / 64;
2878 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2879
2880 if (EB_Lo) {
2881 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2882 FieldLo = NoClass;
2883 FieldHi = Integer;
2884 } else {
2885 FieldLo = Integer;
2886 FieldHi = EB_Hi ? Integer : NoClass;
2887 }
2888 } else
2889 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2890 Lo = merge(Lo, FieldLo);
2891 Hi = merge(Hi, FieldHi);
2892 if (Lo == Memory || Hi == Memory)
2893 break;
2894 }
2895
2896 postMerge(Size, Lo, Hi);
2897 }
2898 }
2899
getIndirectReturnResult(QualType Ty) const2900 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2901 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2902 // place naturally.
2903 if (!isAggregateTypeForABI(Ty)) {
2904 // Treat an enum type as its underlying type.
2905 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2906 Ty = EnumTy->getDecl()->getIntegerType();
2907
2908 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2909 : ABIArgInfo::getDirect());
2910 }
2911
2912 return getNaturalAlignIndirect(Ty);
2913 }
2914
IsIllegalVectorType(QualType Ty) const2915 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2916 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2917 uint64_t Size = getContext().getTypeSize(VecTy);
2918 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2919 if (Size <= 64 || Size > LargestVector)
2920 return true;
2921 QualType EltTy = VecTy->getElementType();
2922 if (passInt128VectorsInMem() &&
2923 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
2924 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
2925 return true;
2926 }
2927
2928 return false;
2929 }
2930
getIndirectResult(QualType Ty,unsigned freeIntRegs) const2931 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2932 unsigned freeIntRegs) const {
2933 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2934 // place naturally.
2935 //
2936 // This assumption is optimistic, as there could be free registers available
2937 // when we need to pass this argument in memory, and LLVM could try to pass
2938 // the argument in the free register. This does not seem to happen currently,
2939 // but this code would be much safer if we could mark the argument with
2940 // 'onstack'. See PR12193.
2941 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2942 // Treat an enum type as its underlying type.
2943 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2944 Ty = EnumTy->getDecl()->getIntegerType();
2945
2946 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2947 : ABIArgInfo::getDirect());
2948 }
2949
2950 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2951 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2952
2953 // Compute the byval alignment. We specify the alignment of the byval in all
2954 // cases so that the mid-level optimizer knows the alignment of the byval.
2955 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2956
2957 // Attempt to avoid passing indirect results using byval when possible. This
2958 // is important for good codegen.
2959 //
2960 // We do this by coercing the value into a scalar type which the backend can
2961 // handle naturally (i.e., without using byval).
2962 //
2963 // For simplicity, we currently only do this when we have exhausted all of the
2964 // free integer registers. Doing this when there are free integer registers
2965 // would require more care, as we would have to ensure that the coerced value
2966 // did not claim the unused register. That would require either reording the
2967 // arguments to the function (so that any subsequent inreg values came first),
2968 // or only doing this optimization when there were no following arguments that
2969 // might be inreg.
2970 //
2971 // We currently expect it to be rare (particularly in well written code) for
2972 // arguments to be passed on the stack when there are still free integer
2973 // registers available (this would typically imply large structs being passed
2974 // by value), so this seems like a fair tradeoff for now.
2975 //
2976 // We can revisit this if the backend grows support for 'onstack' parameter
2977 // attributes. See PR12193.
2978 if (freeIntRegs == 0) {
2979 uint64_t Size = getContext().getTypeSize(Ty);
2980
2981 // If this type fits in an eightbyte, coerce it into the matching integral
2982 // type, which will end up on the stack (with alignment 8).
2983 if (Align == 8 && Size <= 64)
2984 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2985 Size));
2986 }
2987
2988 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2989 }
2990
2991 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2992 /// register. Pick an LLVM IR type that will be passed as a vector register.
GetByteVectorType(QualType Ty) const2993 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2994 // Wrapper structs/arrays that only contain vectors are passed just like
2995 // vectors; strip them off if present.
2996 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2997 Ty = QualType(InnerTy, 0);
2998
2999 llvm::Type *IRType = CGT.ConvertType(Ty);
3000 if (isa<llvm::VectorType>(IRType)) {
3001 // Don't pass vXi128 vectors in their native type, the backend can't
3002 // legalize them.
3003 if (passInt128VectorsInMem() &&
3004 IRType->getVectorElementType()->isIntegerTy(128)) {
3005 // Use a vXi64 vector.
3006 uint64_t Size = getContext().getTypeSize(Ty);
3007 return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3008 Size / 64);
3009 }
3010
3011 return IRType;
3012 }
3013
3014 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3015 return IRType;
3016
3017 // We couldn't find the preferred IR vector type for 'Ty'.
3018 uint64_t Size = getContext().getTypeSize(Ty);
3019 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
3020
3021
3022 // Return a LLVM IR vector type based on the size of 'Ty'.
3023 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3024 Size / 64);
3025 }
3026
3027 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
3028 /// is known to either be off the end of the specified type or being in
3029 /// alignment padding. The user type specified is known to be at most 128 bits
3030 /// in size, and have passed through X86_64ABIInfo::classify with a successful
3031 /// classification that put one of the two halves in the INTEGER class.
3032 ///
3033 /// It is conservatively correct to return false.
BitsContainNoUserData(QualType Ty,unsigned StartBit,unsigned EndBit,ASTContext & Context)3034 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3035 unsigned EndBit, ASTContext &Context) {
3036 // If the bytes being queried are off the end of the type, there is no user
3037 // data hiding here. This handles analysis of builtins, vectors and other
3038 // types that don't contain interesting padding.
3039 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3040 if (TySize <= StartBit)
3041 return true;
3042
3043 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3044 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3045 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3046
3047 // Check each element to see if the element overlaps with the queried range.
3048 for (unsigned i = 0; i != NumElts; ++i) {
3049 // If the element is after the span we care about, then we're done..
3050 unsigned EltOffset = i*EltSize;
3051 if (EltOffset >= EndBit) break;
3052
3053 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3054 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3055 EndBit-EltOffset, Context))
3056 return false;
3057 }
3058 // If it overlaps no elements, then it is safe to process as padding.
3059 return true;
3060 }
3061
3062 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3063 const RecordDecl *RD = RT->getDecl();
3064 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3065
3066 // If this is a C++ record, check the bases first.
3067 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3068 for (const auto &I : CXXRD->bases()) {
3069 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3070 "Unexpected base class!");
3071 const auto *Base =
3072 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3073
3074 // If the base is after the span we care about, ignore it.
3075 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3076 if (BaseOffset >= EndBit) continue;
3077
3078 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3079 if (!BitsContainNoUserData(I.getType(), BaseStart,
3080 EndBit-BaseOffset, Context))
3081 return false;
3082 }
3083 }
3084
3085 // Verify that no field has data that overlaps the region of interest. Yes
3086 // this could be sped up a lot by being smarter about queried fields,
3087 // however we're only looking at structs up to 16 bytes, so we don't care
3088 // much.
3089 unsigned idx = 0;
3090 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3091 i != e; ++i, ++idx) {
3092 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3093
3094 // If we found a field after the region we care about, then we're done.
3095 if (FieldOffset >= EndBit) break;
3096
3097 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3098 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3099 Context))
3100 return false;
3101 }
3102
3103 // If nothing in this record overlapped the area of interest, then we're
3104 // clean.
3105 return true;
3106 }
3107
3108 return false;
3109 }
3110
3111 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3112 /// float member at the specified offset. For example, {int,{float}} has a
3113 /// float at offset 4. It is conservatively correct for this routine to return
3114 /// false.
ContainsFloatAtOffset(llvm::Type * IRType,unsigned IROffset,const llvm::DataLayout & TD)3115 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3116 const llvm::DataLayout &TD) {
3117 // Base case if we find a float.
3118 if (IROffset == 0 && IRType->isFloatTy())
3119 return true;
3120
3121 // If this is a struct, recurse into the field at the specified offset.
3122 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3123 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3124 unsigned Elt = SL->getElementContainingOffset(IROffset);
3125 IROffset -= SL->getElementOffset(Elt);
3126 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3127 }
3128
3129 // If this is an array, recurse into the field at the specified offset.
3130 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3131 llvm::Type *EltTy = ATy->getElementType();
3132 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3133 IROffset -= IROffset/EltSize*EltSize;
3134 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3135 }
3136
3137 return false;
3138 }
3139
3140
3141 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3142 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3143 llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const3144 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3145 QualType SourceTy, unsigned SourceOffset) const {
3146 // The only three choices we have are either double, <2 x float>, or float. We
3147 // pass as float if the last 4 bytes is just padding. This happens for
3148 // structs that contain 3 floats.
3149 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3150 SourceOffset*8+64, getContext()))
3151 return llvm::Type::getFloatTy(getVMContext());
3152
3153 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3154 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3155 // case.
3156 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3157 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3158 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3159
3160 return llvm::Type::getDoubleTy(getVMContext());
3161 }
3162
3163
3164 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3165 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3166 /// about the high or low part of an up-to-16-byte struct. This routine picks
3167 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3168 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3169 /// etc).
3170 ///
3171 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3172 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3173 /// the 8-byte value references. PrefType may be null.
3174 ///
3175 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3176 /// an offset into this that we're processing (which is always either 0 or 8).
3177 ///
3178 llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const3179 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3180 QualType SourceTy, unsigned SourceOffset) const {
3181 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3182 // returning an 8-byte unit starting with it. See if we can safely use it.
3183 if (IROffset == 0) {
3184 // Pointers and int64's always fill the 8-byte unit.
3185 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3186 IRType->isIntegerTy(64))
3187 return IRType;
3188
3189 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3190 // goodness in the source type is just tail padding. This is allowed to
3191 // kick in for struct {double,int} on the int, but not on
3192 // struct{double,int,int} because we wouldn't return the second int. We
3193 // have to do this analysis on the source type because we can't depend on
3194 // unions being lowered a specific way etc.
3195 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3196 IRType->isIntegerTy(32) ||
3197 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3198 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3199 cast<llvm::IntegerType>(IRType)->getBitWidth();
3200
3201 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3202 SourceOffset*8+64, getContext()))
3203 return IRType;
3204 }
3205 }
3206
3207 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3208 // If this is a struct, recurse into the field at the specified offset.
3209 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3210 if (IROffset < SL->getSizeInBytes()) {
3211 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3212 IROffset -= SL->getElementOffset(FieldIdx);
3213
3214 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3215 SourceTy, SourceOffset);
3216 }
3217 }
3218
3219 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3220 llvm::Type *EltTy = ATy->getElementType();
3221 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3222 unsigned EltOffset = IROffset/EltSize*EltSize;
3223 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3224 SourceOffset);
3225 }
3226
3227 // Okay, we don't have any better idea of what to pass, so we pass this in an
3228 // integer register that isn't too big to fit the rest of the struct.
3229 unsigned TySizeInBytes =
3230 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3231
3232 assert(TySizeInBytes != SourceOffset && "Empty field?");
3233
3234 // It is always safe to classify this as an integer type up to i64 that
3235 // isn't larger than the structure.
3236 return llvm::IntegerType::get(getVMContext(),
3237 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3238 }
3239
3240
3241 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3242 /// be used as elements of a two register pair to pass or return, return a
3243 /// first class aggregate to represent them. For example, if the low part of
3244 /// a by-value argument should be passed as i32* and the high part as float,
3245 /// return {i32*, float}.
3246 static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type * Lo,llvm::Type * Hi,const llvm::DataLayout & TD)3247 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3248 const llvm::DataLayout &TD) {
3249 // In order to correctly satisfy the ABI, we need to the high part to start
3250 // at offset 8. If the high and low parts we inferred are both 4-byte types
3251 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3252 // the second element at offset 8. Check for this:
3253 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3254 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3255 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3256 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3257
3258 // To handle this, we have to increase the size of the low part so that the
3259 // second element will start at an 8 byte offset. We can't increase the size
3260 // of the second element because it might make us access off the end of the
3261 // struct.
3262 if (HiStart != 8) {
3263 // There are usually two sorts of types the ABI generation code can produce
3264 // for the low part of a pair that aren't 8 bytes in size: float or
3265 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3266 // NaCl).
3267 // Promote these to a larger type.
3268 if (Lo->isFloatTy())
3269 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3270 else {
3271 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3272 && "Invalid/unknown lo type");
3273 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3274 }
3275 }
3276
3277 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3278
3279 // Verify that the second element is at an 8-byte offset.
3280 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3281 "Invalid x86-64 argument pair!");
3282 return Result;
3283 }
3284
3285 ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const3286 classifyReturnType(QualType RetTy) const {
3287 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3288 // classification algorithm.
3289 X86_64ABIInfo::Class Lo, Hi;
3290 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3291
3292 // Check some invariants.
3293 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3294 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3295
3296 llvm::Type *ResType = nullptr;
3297 switch (Lo) {
3298 case NoClass:
3299 if (Hi == NoClass)
3300 return ABIArgInfo::getIgnore();
3301 // If the low part is just padding, it takes no register, leave ResType
3302 // null.
3303 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3304 "Unknown missing lo part");
3305 break;
3306
3307 case SSEUp:
3308 case X87Up:
3309 llvm_unreachable("Invalid classification for lo word.");
3310
3311 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3312 // hidden argument.
3313 case Memory:
3314 return getIndirectReturnResult(RetTy);
3315
3316 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3317 // available register of the sequence %rax, %rdx is used.
3318 case Integer:
3319 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3320
3321 // If we have a sign or zero extended integer, make sure to return Extend
3322 // so that the parameter gets the right LLVM IR attributes.
3323 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3324 // Treat an enum type as its underlying type.
3325 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3326 RetTy = EnumTy->getDecl()->getIntegerType();
3327
3328 if (RetTy->isIntegralOrEnumerationType() &&
3329 RetTy->isPromotableIntegerType())
3330 return ABIArgInfo::getExtend(RetTy);
3331 }
3332 break;
3333
3334 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3335 // available SSE register of the sequence %xmm0, %xmm1 is used.
3336 case SSE:
3337 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3338 break;
3339
3340 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3341 // returned on the X87 stack in %st0 as 80-bit x87 number.
3342 case X87:
3343 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3344 break;
3345
3346 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3347 // part of the value is returned in %st0 and the imaginary part in
3348 // %st1.
3349 case ComplexX87:
3350 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3351 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3352 llvm::Type::getX86_FP80Ty(getVMContext()));
3353 break;
3354 }
3355
3356 llvm::Type *HighPart = nullptr;
3357 switch (Hi) {
3358 // Memory was handled previously and X87 should
3359 // never occur as a hi class.
3360 case Memory:
3361 case X87:
3362 llvm_unreachable("Invalid classification for hi word.");
3363
3364 case ComplexX87: // Previously handled.
3365 case NoClass:
3366 break;
3367
3368 case Integer:
3369 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3370 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3371 return ABIArgInfo::getDirect(HighPart, 8);
3372 break;
3373 case SSE:
3374 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3375 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3376 return ABIArgInfo::getDirect(HighPart, 8);
3377 break;
3378
3379 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3380 // is passed in the next available eightbyte chunk if the last used
3381 // vector register.
3382 //
3383 // SSEUP should always be preceded by SSE, just widen.
3384 case SSEUp:
3385 assert(Lo == SSE && "Unexpected SSEUp classification.");
3386 ResType = GetByteVectorType(RetTy);
3387 break;
3388
3389 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3390 // returned together with the previous X87 value in %st0.
3391 case X87Up:
3392 // If X87Up is preceded by X87, we don't need to do
3393 // anything. However, in some cases with unions it may not be
3394 // preceded by X87. In such situations we follow gcc and pass the
3395 // extra bits in an SSE reg.
3396 if (Lo != X87) {
3397 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3398 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3399 return ABIArgInfo::getDirect(HighPart, 8);
3400 }
3401 break;
3402 }
3403
3404 // If a high part was specified, merge it together with the low part. It is
3405 // known to pass in the high eightbyte of the result. We do this by forming a
3406 // first class struct aggregate with the high and low part: {low, high}
3407 if (HighPart)
3408 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3409
3410 return ABIArgInfo::getDirect(ResType);
3411 }
3412
classifyArgumentType(QualType Ty,unsigned freeIntRegs,unsigned & neededInt,unsigned & neededSSE,bool isNamedArg) const3413 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3414 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3415 bool isNamedArg)
3416 const
3417 {
3418 Ty = useFirstFieldIfTransparentUnion(Ty);
3419
3420 X86_64ABIInfo::Class Lo, Hi;
3421 classify(Ty, 0, Lo, Hi, isNamedArg);
3422
3423 // Check some invariants.
3424 // FIXME: Enforce these by construction.
3425 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3426 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3427
3428 neededInt = 0;
3429 neededSSE = 0;
3430 llvm::Type *ResType = nullptr;
3431 switch (Lo) {
3432 case NoClass:
3433 if (Hi == NoClass)
3434 return ABIArgInfo::getIgnore();
3435 // If the low part is just padding, it takes no register, leave ResType
3436 // null.
3437 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3438 "Unknown missing lo part");
3439 break;
3440
3441 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3442 // on the stack.
3443 case Memory:
3444
3445 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3446 // COMPLEX_X87, it is passed in memory.
3447 case X87:
3448 case ComplexX87:
3449 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3450 ++neededInt;
3451 return getIndirectResult(Ty, freeIntRegs);
3452
3453 case SSEUp:
3454 case X87Up:
3455 llvm_unreachable("Invalid classification for lo word.");
3456
3457 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3458 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3459 // and %r9 is used.
3460 case Integer:
3461 ++neededInt;
3462
3463 // Pick an 8-byte type based on the preferred type.
3464 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3465
3466 // If we have a sign or zero extended integer, make sure to return Extend
3467 // so that the parameter gets the right LLVM IR attributes.
3468 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3469 // Treat an enum type as its underlying type.
3470 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3471 Ty = EnumTy->getDecl()->getIntegerType();
3472
3473 if (Ty->isIntegralOrEnumerationType() &&
3474 Ty->isPromotableIntegerType())
3475 return ABIArgInfo::getExtend(Ty);
3476 }
3477
3478 break;
3479
3480 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3481 // available SSE register is used, the registers are taken in the
3482 // order from %xmm0 to %xmm7.
3483 case SSE: {
3484 llvm::Type *IRType = CGT.ConvertType(Ty);
3485 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3486 ++neededSSE;
3487 break;
3488 }
3489 }
3490
3491 llvm::Type *HighPart = nullptr;
3492 switch (Hi) {
3493 // Memory was handled previously, ComplexX87 and X87 should
3494 // never occur as hi classes, and X87Up must be preceded by X87,
3495 // which is passed in memory.
3496 case Memory:
3497 case X87:
3498 case ComplexX87:
3499 llvm_unreachable("Invalid classification for hi word.");
3500
3501 case NoClass: break;
3502
3503 case Integer:
3504 ++neededInt;
3505 // Pick an 8-byte type based on the preferred type.
3506 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3507
3508 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3509 return ABIArgInfo::getDirect(HighPart, 8);
3510 break;
3511
3512 // X87Up generally doesn't occur here (long double is passed in
3513 // memory), except in situations involving unions.
3514 case X87Up:
3515 case SSE:
3516 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3517
3518 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3519 return ABIArgInfo::getDirect(HighPart, 8);
3520
3521 ++neededSSE;
3522 break;
3523
3524 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3525 // eightbyte is passed in the upper half of the last used SSE
3526 // register. This only happens when 128-bit vectors are passed.
3527 case SSEUp:
3528 assert(Lo == SSE && "Unexpected SSEUp classification");
3529 ResType = GetByteVectorType(Ty);
3530 break;
3531 }
3532
3533 // If a high part was specified, merge it together with the low part. It is
3534 // known to pass in the high eightbyte of the result. We do this by forming a
3535 // first class struct aggregate with the high and low part: {low, high}
3536 if (HighPart)
3537 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3538
3539 return ABIArgInfo::getDirect(ResType);
3540 }
3541
3542 ABIArgInfo
classifyRegCallStructTypeImpl(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE) const3543 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3544 unsigned &NeededSSE) const {
3545 auto RT = Ty->getAs<RecordType>();
3546 assert(RT && "classifyRegCallStructType only valid with struct types");
3547
3548 if (RT->getDecl()->hasFlexibleArrayMember())
3549 return getIndirectReturnResult(Ty);
3550
3551 // Sum up bases
3552 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3553 if (CXXRD->isDynamicClass()) {
3554 NeededInt = NeededSSE = 0;
3555 return getIndirectReturnResult(Ty);
3556 }
3557
3558 for (const auto &I : CXXRD->bases())
3559 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3560 .isIndirect()) {
3561 NeededInt = NeededSSE = 0;
3562 return getIndirectReturnResult(Ty);
3563 }
3564 }
3565
3566 // Sum up members
3567 for (const auto *FD : RT->getDecl()->fields()) {
3568 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3569 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3570 .isIndirect()) {
3571 NeededInt = NeededSSE = 0;
3572 return getIndirectReturnResult(Ty);
3573 }
3574 } else {
3575 unsigned LocalNeededInt, LocalNeededSSE;
3576 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3577 LocalNeededSSE, true)
3578 .isIndirect()) {
3579 NeededInt = NeededSSE = 0;
3580 return getIndirectReturnResult(Ty);
3581 }
3582 NeededInt += LocalNeededInt;
3583 NeededSSE += LocalNeededSSE;
3584 }
3585 }
3586
3587 return ABIArgInfo::getDirect();
3588 }
3589
classifyRegCallStructType(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE) const3590 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3591 unsigned &NeededInt,
3592 unsigned &NeededSSE) const {
3593
3594 NeededInt = 0;
3595 NeededSSE = 0;
3596
3597 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3598 }
3599
computeInfo(CGFunctionInfo & FI) const3600 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3601
3602 const unsigned CallingConv = FI.getCallingConvention();
3603 // It is possible to force Win64 calling convention on any x86_64 target by
3604 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3605 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3606 if (CallingConv == llvm::CallingConv::Win64) {
3607 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3608 Win64ABIInfo.computeInfo(FI);
3609 return;
3610 }
3611
3612 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3613
3614 // Keep track of the number of assigned registers.
3615 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3616 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3617 unsigned NeededInt, NeededSSE;
3618
3619 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3620 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3621 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3622 FI.getReturnInfo() =
3623 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3624 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3625 FreeIntRegs -= NeededInt;
3626 FreeSSERegs -= NeededSSE;
3627 } else {
3628 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3629 }
3630 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
3631 // Complex Long Double Type is passed in Memory when Regcall
3632 // calling convention is used.
3633 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
3634 if (getContext().getCanonicalType(CT->getElementType()) ==
3635 getContext().LongDoubleTy)
3636 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3637 } else
3638 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3639 }
3640
3641 // If the return value is indirect, then the hidden argument is consuming one
3642 // integer register.
3643 if (FI.getReturnInfo().isIndirect())
3644 --FreeIntRegs;
3645
3646 // The chain argument effectively gives us another free register.
3647 if (FI.isChainCall())
3648 ++FreeIntRegs;
3649
3650 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3651 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3652 // get assigned (in left-to-right order) for passing as follows...
3653 unsigned ArgNo = 0;
3654 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3655 it != ie; ++it, ++ArgNo) {
3656 bool IsNamedArg = ArgNo < NumRequiredArgs;
3657
3658 if (IsRegCall && it->type->isStructureOrClassType())
3659 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3660 else
3661 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3662 NeededSSE, IsNamedArg);
3663
3664 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3665 // eightbyte of an argument, the whole argument is passed on the
3666 // stack. If registers have already been assigned for some
3667 // eightbytes of such an argument, the assignments get reverted.
3668 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3669 FreeIntRegs -= NeededInt;
3670 FreeSSERegs -= NeededSSE;
3671 } else {
3672 it->info = getIndirectResult(it->type, FreeIntRegs);
3673 }
3674 }
3675 }
3676
EmitX86_64VAArgFromMemory(CodeGenFunction & CGF,Address VAListAddr,QualType Ty)3677 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3678 Address VAListAddr, QualType Ty) {
3679 Address overflow_arg_area_p =
3680 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3681 llvm::Value *overflow_arg_area =
3682 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3683
3684 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3685 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3686 // It isn't stated explicitly in the standard, but in practice we use
3687 // alignment greater than 16 where necessary.
3688 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3689 if (Align > CharUnits::fromQuantity(8)) {
3690 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3691 Align);
3692 }
3693
3694 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3695 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3696 llvm::Value *Res =
3697 CGF.Builder.CreateBitCast(overflow_arg_area,
3698 llvm::PointerType::getUnqual(LTy));
3699
3700 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3701 // l->overflow_arg_area + sizeof(type).
3702 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3703 // an 8 byte boundary.
3704
3705 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3706 llvm::Value *Offset =
3707 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3708 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3709 "overflow_arg_area.next");
3710 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3711
3712 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3713 return Address(Res, Align);
3714 }
3715
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3716 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3717 QualType Ty) const {
3718 // Assume that va_list type is correct; should be pointer to LLVM type:
3719 // struct {
3720 // i32 gp_offset;
3721 // i32 fp_offset;
3722 // i8* overflow_arg_area;
3723 // i8* reg_save_area;
3724 // };
3725 unsigned neededInt, neededSSE;
3726
3727 Ty = getContext().getCanonicalType(Ty);
3728 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3729 /*isNamedArg*/false);
3730
3731 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3732 // in the registers. If not go to step 7.
3733 if (!neededInt && !neededSSE)
3734 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3735
3736 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3737 // general purpose registers needed to pass type and num_fp to hold
3738 // the number of floating point registers needed.
3739
3740 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3741 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3742 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3743 //
3744 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3745 // register save space).
3746
3747 llvm::Value *InRegs = nullptr;
3748 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3749 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3750 if (neededInt) {
3751 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3752 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3753 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3754 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3755 }
3756
3757 if (neededSSE) {
3758 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3759 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3760 llvm::Value *FitsInFP =
3761 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3762 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3763 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3764 }
3765
3766 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3767 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3768 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3769 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3770
3771 // Emit code to load the value if it was passed in registers.
3772
3773 CGF.EmitBlock(InRegBlock);
3774
3775 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3776 // an offset of l->gp_offset and/or l->fp_offset. This may require
3777 // copying to a temporary location in case the parameter is passed
3778 // in different register classes or requires an alignment greater
3779 // than 8 for general purpose registers and 16 for XMM registers.
3780 //
3781 // FIXME: This really results in shameful code when we end up needing to
3782 // collect arguments from different places; often what should result in a
3783 // simple assembling of a structure from scattered addresses has many more
3784 // loads than necessary. Can we clean this up?
3785 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3786 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3787 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
3788
3789 Address RegAddr = Address::invalid();
3790 if (neededInt && neededSSE) {
3791 // FIXME: Cleanup.
3792 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3793 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3794 Address Tmp = CGF.CreateMemTemp(Ty);
3795 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3796 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3797 llvm::Type *TyLo = ST->getElementType(0);
3798 llvm::Type *TyHi = ST->getElementType(1);
3799 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3800 "Unexpected ABI info for mixed regs");
3801 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3802 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3803 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3804 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3805 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3806 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3807
3808 // Copy the first element.
3809 // FIXME: Our choice of alignment here and below is probably pessimistic.
3810 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3811 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3812 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3813 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3814
3815 // Copy the second element.
3816 V = CGF.Builder.CreateAlignedLoad(
3817 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3818 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3819 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3820
3821 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3822 } else if (neededInt) {
3823 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3824 CharUnits::fromQuantity(8));
3825 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3826
3827 // Copy to a temporary if necessary to ensure the appropriate alignment.
3828 std::pair<CharUnits, CharUnits> SizeAlign =
3829 getContext().getTypeInfoInChars(Ty);
3830 uint64_t TySize = SizeAlign.first.getQuantity();
3831 CharUnits TyAlign = SizeAlign.second;
3832
3833 // Copy into a temporary if the type is more aligned than the
3834 // register save area.
3835 if (TyAlign.getQuantity() > 8) {
3836 Address Tmp = CGF.CreateMemTemp(Ty);
3837 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3838 RegAddr = Tmp;
3839 }
3840
3841 } else if (neededSSE == 1) {
3842 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3843 CharUnits::fromQuantity(16));
3844 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3845 } else {
3846 assert(neededSSE == 2 && "Invalid number of needed registers!");
3847 // SSE registers are spaced 16 bytes apart in the register save
3848 // area, we need to collect the two eightbytes together.
3849 // The ABI isn't explicit about this, but it seems reasonable
3850 // to assume that the slots are 16-byte aligned, since the stack is
3851 // naturally 16-byte aligned and the prologue is expected to store
3852 // all the SSE registers to the RSA.
3853 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3854 CharUnits::fromQuantity(16));
3855 Address RegAddrHi =
3856 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3857 CharUnits::fromQuantity(16));
3858 llvm::Type *ST = AI.canHaveCoerceToType()
3859 ? AI.getCoerceToType()
3860 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3861 llvm::Value *V;
3862 Address Tmp = CGF.CreateMemTemp(Ty);
3863 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3864 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3865 RegAddrLo, ST->getStructElementType(0)));
3866 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3867 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3868 RegAddrHi, ST->getStructElementType(1)));
3869 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3870
3871 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3872 }
3873
3874 // AMD64-ABI 3.5.7p5: Step 5. Set:
3875 // l->gp_offset = l->gp_offset + num_gp * 8
3876 // l->fp_offset = l->fp_offset + num_fp * 16.
3877 if (neededInt) {
3878 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3879 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3880 gp_offset_p);
3881 }
3882 if (neededSSE) {
3883 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3884 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3885 fp_offset_p);
3886 }
3887 CGF.EmitBranch(ContBlock);
3888
3889 // Emit code to load the value if it was passed in memory.
3890
3891 CGF.EmitBlock(InMemBlock);
3892 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3893
3894 // Return the appropriate result.
3895
3896 CGF.EmitBlock(ContBlock);
3897 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3898 "vaarg.addr");
3899 return ResAddr;
3900 }
3901
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3902 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3903 QualType Ty) const {
3904 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3905 CGF.getContext().getTypeInfoInChars(Ty),
3906 CharUnits::fromQuantity(8),
3907 /*allowHigherAlign*/ false);
3908 }
3909
3910 ABIArgInfo
reclassifyHvaArgType(QualType Ty,unsigned & FreeSSERegs,const ABIArgInfo & current) const3911 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3912 const ABIArgInfo ¤t) const {
3913 // Assumes vectorCall calling convention.
3914 const Type *Base = nullptr;
3915 uint64_t NumElts = 0;
3916
3917 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3918 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3919 FreeSSERegs -= NumElts;
3920 return getDirectX86Hva();
3921 }
3922 return current;
3923 }
3924
classify(QualType Ty,unsigned & FreeSSERegs,bool IsReturnType,bool IsVectorCall,bool IsRegCall) const3925 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3926 bool IsReturnType, bool IsVectorCall,
3927 bool IsRegCall) const {
3928
3929 if (Ty->isVoidType())
3930 return ABIArgInfo::getIgnore();
3931
3932 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3933 Ty = EnumTy->getDecl()->getIntegerType();
3934
3935 TypeInfo Info = getContext().getTypeInfo(Ty);
3936 uint64_t Width = Info.Width;
3937 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3938
3939 const RecordType *RT = Ty->getAs<RecordType>();
3940 if (RT) {
3941 if (!IsReturnType) {
3942 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3943 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3944 }
3945
3946 if (RT->getDecl()->hasFlexibleArrayMember())
3947 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3948
3949 }
3950
3951 const Type *Base = nullptr;
3952 uint64_t NumElts = 0;
3953 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3954 // other targets.
3955 if ((IsVectorCall || IsRegCall) &&
3956 isHomogeneousAggregate(Ty, Base, NumElts)) {
3957 if (IsRegCall) {
3958 if (FreeSSERegs >= NumElts) {
3959 FreeSSERegs -= NumElts;
3960 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3961 return ABIArgInfo::getDirect();
3962 return ABIArgInfo::getExpand();
3963 }
3964 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3965 } else if (IsVectorCall) {
3966 if (FreeSSERegs >= NumElts &&
3967 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3968 FreeSSERegs -= NumElts;
3969 return ABIArgInfo::getDirect();
3970 } else if (IsReturnType) {
3971 return ABIArgInfo::getExpand();
3972 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3973 // HVAs are delayed and reclassified in the 2nd step.
3974 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3975 }
3976 }
3977 }
3978
3979 if (Ty->isMemberPointerType()) {
3980 // If the member pointer is represented by an LLVM int or ptr, pass it
3981 // directly.
3982 llvm::Type *LLTy = CGT.ConvertType(Ty);
3983 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3984 return ABIArgInfo::getDirect();
3985 }
3986
3987 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3988 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3989 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3990 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3991 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3992
3993 // Otherwise, coerce it to a small integer.
3994 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3995 }
3996
3997 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3998 switch (BT->getKind()) {
3999 case BuiltinType::Bool:
4000 // Bool type is always extended to the ABI, other builtin types are not
4001 // extended.
4002 return ABIArgInfo::getExtend(Ty);
4003
4004 case BuiltinType::LongDouble:
4005 // Mingw64 GCC uses the old 80 bit extended precision floating point
4006 // unit. It passes them indirectly through memory.
4007 if (IsMingw64) {
4008 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4009 if (LDF == &llvm::APFloat::x87DoubleExtended())
4010 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4011 }
4012 break;
4013
4014 case BuiltinType::Int128:
4015 case BuiltinType::UInt128:
4016 // If it's a parameter type, the normal ABI rule is that arguments larger
4017 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4018 // even though it isn't particularly efficient.
4019 if (!IsReturnType)
4020 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4021
4022 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4023 // Clang matches them for compatibility.
4024 return ABIArgInfo::getDirect(
4025 llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
4026
4027 default:
4028 break;
4029 }
4030 }
4031
4032 return ABIArgInfo::getDirect();
4033 }
4034
computeVectorCallArgs(CGFunctionInfo & FI,unsigned FreeSSERegs,bool IsVectorCall,bool IsRegCall) const4035 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4036 unsigned FreeSSERegs,
4037 bool IsVectorCall,
4038 bool IsRegCall) const {
4039 unsigned Count = 0;
4040 for (auto &I : FI.arguments()) {
4041 // Vectorcall in x64 only permits the first 6 arguments to be passed
4042 // as XMM/YMM registers.
4043 if (Count < VectorcallMaxParamNumAsReg)
4044 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4045 else {
4046 // Since these cannot be passed in registers, pretend no registers
4047 // are left.
4048 unsigned ZeroSSERegsAvail = 0;
4049 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4050 IsVectorCall, IsRegCall);
4051 }
4052 ++Count;
4053 }
4054
4055 for (auto &I : FI.arguments()) {
4056 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4057 }
4058 }
4059
computeInfo(CGFunctionInfo & FI) const4060 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4061 const unsigned CC = FI.getCallingConvention();
4062 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4063 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4064
4065 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4066 // classification rules.
4067 if (CC == llvm::CallingConv::X86_64_SysV) {
4068 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4069 SysVABIInfo.computeInfo(FI);
4070 return;
4071 }
4072
4073 unsigned FreeSSERegs = 0;
4074 if (IsVectorCall) {
4075 // We can use up to 4 SSE return registers with vectorcall.
4076 FreeSSERegs = 4;
4077 } else if (IsRegCall) {
4078 // RegCall gives us 16 SSE registers.
4079 FreeSSERegs = 16;
4080 }
4081
4082 if (!getCXXABI().classifyReturnType(FI))
4083 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4084 IsVectorCall, IsRegCall);
4085
4086 if (IsVectorCall) {
4087 // We can use up to 6 SSE register parameters with vectorcall.
4088 FreeSSERegs = 6;
4089 } else if (IsRegCall) {
4090 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4091 FreeSSERegs = 16;
4092 }
4093
4094 if (IsVectorCall) {
4095 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4096 } else {
4097 for (auto &I : FI.arguments())
4098 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4099 }
4100
4101 }
4102
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4103 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4104 QualType Ty) const {
4105
4106 bool IsIndirect = false;
4107
4108 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4109 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4110 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4111 uint64_t Width = getContext().getTypeSize(Ty);
4112 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4113 }
4114
4115 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4116 CGF.getContext().getTypeInfoInChars(Ty),
4117 CharUnits::fromQuantity(8),
4118 /*allowHigherAlign*/ false);
4119 }
4120
4121 // PowerPC-32
4122 namespace {
4123 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4124 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4125 bool IsSoftFloatABI;
4126
4127 CharUnits getParamTypeAlignment(QualType Ty) const;
4128
4129 public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,bool SoftFloatABI)4130 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
4131 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4132
4133 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4134 QualType Ty) const override;
4135 };
4136
4137 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4138 public:
PPC32TargetCodeGenInfo(CodeGenTypes & CGT,bool SoftFloatABI)4139 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4140 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4141
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4142 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4143 // This is recovered from gcc output.
4144 return 1; // r1 is the dedicated stack pointer
4145 }
4146
4147 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4148 llvm::Value *Address) const override;
4149 };
4150 }
4151
getParamTypeAlignment(QualType Ty) const4152 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4153 // Complex types are passed just like their elements
4154 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4155 Ty = CTy->getElementType();
4156
4157 if (Ty->isVectorType())
4158 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4159 : 4);
4160
4161 // For single-element float/vector structs, we consider the whole type
4162 // to have the same alignment requirements as its single element.
4163 const Type *AlignTy = nullptr;
4164 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4165 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4166 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4167 (BT && BT->isFloatingPoint()))
4168 AlignTy = EltType;
4169 }
4170
4171 if (AlignTy)
4172 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4173 return CharUnits::fromQuantity(4);
4174 }
4175
4176 // TODO: this implementation is now likely redundant with
4177 // DefaultABIInfo::EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAList,QualType Ty) const4178 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4179 QualType Ty) const {
4180 if (getTarget().getTriple().isOSDarwin()) {
4181 auto TI = getContext().getTypeInfoInChars(Ty);
4182 TI.second = getParamTypeAlignment(Ty);
4183
4184 CharUnits SlotSize = CharUnits::fromQuantity(4);
4185 return emitVoidPtrVAArg(CGF, VAList, Ty,
4186 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4187 /*AllowHigherAlign=*/true);
4188 }
4189
4190 const unsigned OverflowLimit = 8;
4191 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4192 // TODO: Implement this. For now ignore.
4193 (void)CTy;
4194 return Address::invalid(); // FIXME?
4195 }
4196
4197 // struct __va_list_tag {
4198 // unsigned char gpr;
4199 // unsigned char fpr;
4200 // unsigned short reserved;
4201 // void *overflow_arg_area;
4202 // void *reg_save_area;
4203 // };
4204
4205 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4206 bool isInt =
4207 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4208 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4209
4210 // All aggregates are passed indirectly? That doesn't seem consistent
4211 // with the argument-lowering code.
4212 bool isIndirect = Ty->isAggregateType();
4213
4214 CGBuilderTy &Builder = CGF.Builder;
4215
4216 // The calling convention either uses 1-2 GPRs or 1 FPR.
4217 Address NumRegsAddr = Address::invalid();
4218 if (isInt || IsSoftFloatABI) {
4219 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4220 } else {
4221 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4222 }
4223
4224 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4225
4226 // "Align" the register count when TY is i64.
4227 if (isI64 || (isF64 && IsSoftFloatABI)) {
4228 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4229 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4230 }
4231
4232 llvm::Value *CC =
4233 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4234
4235 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4236 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4237 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4238
4239 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4240
4241 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4242 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4243
4244 // Case 1: consume registers.
4245 Address RegAddr = Address::invalid();
4246 {
4247 CGF.EmitBlock(UsingRegs);
4248
4249 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4250 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4251 CharUnits::fromQuantity(8));
4252 assert(RegAddr.getElementType() == CGF.Int8Ty);
4253
4254 // Floating-point registers start after the general-purpose registers.
4255 if (!(isInt || IsSoftFloatABI)) {
4256 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4257 CharUnits::fromQuantity(32));
4258 }
4259
4260 // Get the address of the saved value by scaling the number of
4261 // registers we've used by the number of
4262 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4263 llvm::Value *RegOffset =
4264 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4265 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4266 RegAddr.getPointer(), RegOffset),
4267 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4268 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4269
4270 // Increase the used-register count.
4271 NumRegs =
4272 Builder.CreateAdd(NumRegs,
4273 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4274 Builder.CreateStore(NumRegs, NumRegsAddr);
4275
4276 CGF.EmitBranch(Cont);
4277 }
4278
4279 // Case 2: consume space in the overflow area.
4280 Address MemAddr = Address::invalid();
4281 {
4282 CGF.EmitBlock(UsingOverflow);
4283
4284 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4285
4286 // Everything in the overflow area is rounded up to a size of at least 4.
4287 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4288
4289 CharUnits Size;
4290 if (!isIndirect) {
4291 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4292 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4293 } else {
4294 Size = CGF.getPointerSize();
4295 }
4296
4297 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4298 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4299 OverflowAreaAlign);
4300 // Round up address of argument to alignment
4301 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4302 if (Align > OverflowAreaAlign) {
4303 llvm::Value *Ptr = OverflowArea.getPointer();
4304 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4305 Align);
4306 }
4307
4308 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4309
4310 // Increase the overflow area.
4311 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4312 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4313 CGF.EmitBranch(Cont);
4314 }
4315
4316 CGF.EmitBlock(Cont);
4317
4318 // Merge the cases with a phi.
4319 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4320 "vaarg.addr");
4321
4322 // Load the pointer if the argument was passed indirectly.
4323 if (isIndirect) {
4324 Result = Address(Builder.CreateLoad(Result, "aggr"),
4325 getContext().getTypeAlignInChars(Ty));
4326 }
4327
4328 return Result;
4329 }
4330
4331 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4332 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4333 llvm::Value *Address) const {
4334 // This is calculated from the LLVM and GCC tables and verified
4335 // against gcc output. AFAIK all ABIs use the same encoding.
4336
4337 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4338
4339 llvm::IntegerType *i8 = CGF.Int8Ty;
4340 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4341 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4342 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4343
4344 // 0-31: r0-31, the 4-byte general-purpose registers
4345 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4346
4347 // 32-63: fp0-31, the 8-byte floating-point registers
4348 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4349
4350 // 64-76 are various 4-byte special-purpose registers:
4351 // 64: mq
4352 // 65: lr
4353 // 66: ctr
4354 // 67: ap
4355 // 68-75 cr0-7
4356 // 76: xer
4357 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4358
4359 // 77-108: v0-31, the 16-byte vector registers
4360 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4361
4362 // 109: vrsave
4363 // 110: vscr
4364 // 111: spe_acc
4365 // 112: spefscr
4366 // 113: sfp
4367 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4368
4369 return false;
4370 }
4371
4372 // PowerPC-64
4373
4374 namespace {
4375 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4376 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4377 public:
4378 enum ABIKind {
4379 ELFv1 = 0,
4380 ELFv2
4381 };
4382
4383 private:
4384 static const unsigned GPRBits = 64;
4385 ABIKind Kind;
4386 bool HasQPX;
4387 bool IsSoftFloatABI;
4388
4389 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4390 // will be passed in a QPX register.
IsQPXVectorTy(const Type * Ty) const4391 bool IsQPXVectorTy(const Type *Ty) const {
4392 if (!HasQPX)
4393 return false;
4394
4395 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4396 unsigned NumElements = VT->getNumElements();
4397 if (NumElements == 1)
4398 return false;
4399
4400 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4401 if (getContext().getTypeSize(Ty) <= 256)
4402 return true;
4403 } else if (VT->getElementType()->
4404 isSpecificBuiltinType(BuiltinType::Float)) {
4405 if (getContext().getTypeSize(Ty) <= 128)
4406 return true;
4407 }
4408 }
4409
4410 return false;
4411 }
4412
IsQPXVectorTy(QualType Ty) const4413 bool IsQPXVectorTy(QualType Ty) const {
4414 return IsQPXVectorTy(Ty.getTypePtr());
4415 }
4416
4417 public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,ABIKind Kind,bool HasQPX,bool SoftFloatABI)4418 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4419 bool SoftFloatABI)
4420 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4421 IsSoftFloatABI(SoftFloatABI) {}
4422
4423 bool isPromotableTypeForABI(QualType Ty) const;
4424 CharUnits getParamTypeAlignment(QualType Ty) const;
4425
4426 ABIArgInfo classifyReturnType(QualType RetTy) const;
4427 ABIArgInfo classifyArgumentType(QualType Ty) const;
4428
4429 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4430 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4431 uint64_t Members) const override;
4432
4433 // TODO: We can add more logic to computeInfo to improve performance.
4434 // Example: For aggregate arguments that fit in a register, we could
4435 // use getDirectInReg (as is done below for structs containing a single
4436 // floating-point value) to avoid pushing them to memory on function
4437 // entry. This would require changing the logic in PPCISelLowering
4438 // when lowering the parameters in the caller and args in the callee.
computeInfo(CGFunctionInfo & FI) const4439 void computeInfo(CGFunctionInfo &FI) const override {
4440 if (!getCXXABI().classifyReturnType(FI))
4441 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4442 for (auto &I : FI.arguments()) {
4443 // We rely on the default argument classification for the most part.
4444 // One exception: An aggregate containing a single floating-point
4445 // or vector item must be passed in a register if one is available.
4446 const Type *T = isSingleElementStruct(I.type, getContext());
4447 if (T) {
4448 const BuiltinType *BT = T->getAs<BuiltinType>();
4449 if (IsQPXVectorTy(T) ||
4450 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4451 (BT && BT->isFloatingPoint())) {
4452 QualType QT(T, 0);
4453 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4454 continue;
4455 }
4456 }
4457 I.info = classifyArgumentType(I.type);
4458 }
4459 }
4460
4461 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4462 QualType Ty) const override;
4463
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const4464 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4465 bool asReturnValue) const override {
4466 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4467 }
4468
isSwiftErrorInRegister() const4469 bool isSwiftErrorInRegister() const override {
4470 return false;
4471 }
4472 };
4473
4474 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4475
4476 public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes & CGT,PPC64_SVR4_ABIInfo::ABIKind Kind,bool HasQPX,bool SoftFloatABI)4477 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4478 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4479 bool SoftFloatABI)
4480 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4481 SoftFloatABI)) {}
4482
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4483 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4484 // This is recovered from gcc output.
4485 return 1; // r1 is the dedicated stack pointer
4486 }
4487
4488 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4489 llvm::Value *Address) const override;
4490 };
4491
4492 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4493 public:
PPC64TargetCodeGenInfo(CodeGenTypes & CGT)4494 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4495
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4496 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4497 // This is recovered from gcc output.
4498 return 1; // r1 is the dedicated stack pointer
4499 }
4500
4501 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4502 llvm::Value *Address) const override;
4503 };
4504
4505 }
4506
4507 // Return true if the ABI requires Ty to be passed sign- or zero-
4508 // extended to 64 bits.
4509 bool
isPromotableTypeForABI(QualType Ty) const4510 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4511 // Treat an enum type as its underlying type.
4512 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4513 Ty = EnumTy->getDecl()->getIntegerType();
4514
4515 // Promotable integer types are required to be promoted by the ABI.
4516 if (Ty->isPromotableIntegerType())
4517 return true;
4518
4519 // In addition to the usual promotable integer types, we also need to
4520 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4521 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4522 switch (BT->getKind()) {
4523 case BuiltinType::Int:
4524 case BuiltinType::UInt:
4525 return true;
4526 default:
4527 break;
4528 }
4529
4530 return false;
4531 }
4532
4533 /// isAlignedParamType - Determine whether a type requires 16-byte or
4534 /// higher alignment in the parameter area. Always returns at least 8.
getParamTypeAlignment(QualType Ty) const4535 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4536 // Complex types are passed just like their elements.
4537 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4538 Ty = CTy->getElementType();
4539
4540 // Only vector types of size 16 bytes need alignment (larger types are
4541 // passed via reference, smaller types are not aligned).
4542 if (IsQPXVectorTy(Ty)) {
4543 if (getContext().getTypeSize(Ty) > 128)
4544 return CharUnits::fromQuantity(32);
4545
4546 return CharUnits::fromQuantity(16);
4547 } else if (Ty->isVectorType()) {
4548 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4549 }
4550
4551 // For single-element float/vector structs, we consider the whole type
4552 // to have the same alignment requirements as its single element.
4553 const Type *AlignAsType = nullptr;
4554 const Type *EltType = isSingleElementStruct(Ty, getContext());
4555 if (EltType) {
4556 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4557 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4558 getContext().getTypeSize(EltType) == 128) ||
4559 (BT && BT->isFloatingPoint()))
4560 AlignAsType = EltType;
4561 }
4562
4563 // Likewise for ELFv2 homogeneous aggregates.
4564 const Type *Base = nullptr;
4565 uint64_t Members = 0;
4566 if (!AlignAsType && Kind == ELFv2 &&
4567 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4568 AlignAsType = Base;
4569
4570 // With special case aggregates, only vector base types need alignment.
4571 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4572 if (getContext().getTypeSize(AlignAsType) > 128)
4573 return CharUnits::fromQuantity(32);
4574
4575 return CharUnits::fromQuantity(16);
4576 } else if (AlignAsType) {
4577 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4578 }
4579
4580 // Otherwise, we only need alignment for any aggregate type that
4581 // has an alignment requirement of >= 16 bytes.
4582 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4583 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4584 return CharUnits::fromQuantity(32);
4585 return CharUnits::fromQuantity(16);
4586 }
4587
4588 return CharUnits::fromQuantity(8);
4589 }
4590
4591 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4592 /// aggregate. Base is set to the base element type, and Members is set
4593 /// to the number of base elements.
isHomogeneousAggregate(QualType Ty,const Type * & Base,uint64_t & Members) const4594 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4595 uint64_t &Members) const {
4596 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4597 uint64_t NElements = AT->getSize().getZExtValue();
4598 if (NElements == 0)
4599 return false;
4600 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4601 return false;
4602 Members *= NElements;
4603 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4604 const RecordDecl *RD = RT->getDecl();
4605 if (RD->hasFlexibleArrayMember())
4606 return false;
4607
4608 Members = 0;
4609
4610 // If this is a C++ record, check the bases first.
4611 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4612 for (const auto &I : CXXRD->bases()) {
4613 // Ignore empty records.
4614 if (isEmptyRecord(getContext(), I.getType(), true))
4615 continue;
4616
4617 uint64_t FldMembers;
4618 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4619 return false;
4620
4621 Members += FldMembers;
4622 }
4623 }
4624
4625 for (const auto *FD : RD->fields()) {
4626 // Ignore (non-zero arrays of) empty records.
4627 QualType FT = FD->getType();
4628 while (const ConstantArrayType *AT =
4629 getContext().getAsConstantArrayType(FT)) {
4630 if (AT->getSize().getZExtValue() == 0)
4631 return false;
4632 FT = AT->getElementType();
4633 }
4634 if (isEmptyRecord(getContext(), FT, true))
4635 continue;
4636
4637 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4638 if (getContext().getLangOpts().CPlusPlus &&
4639 FD->isZeroLengthBitField(getContext()))
4640 continue;
4641
4642 uint64_t FldMembers;
4643 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4644 return false;
4645
4646 Members = (RD->isUnion() ?
4647 std::max(Members, FldMembers) : Members + FldMembers);
4648 }
4649
4650 if (!Base)
4651 return false;
4652
4653 // Ensure there is no padding.
4654 if (getContext().getTypeSize(Base) * Members !=
4655 getContext().getTypeSize(Ty))
4656 return false;
4657 } else {
4658 Members = 1;
4659 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4660 Members = 2;
4661 Ty = CT->getElementType();
4662 }
4663
4664 // Most ABIs only support float, double, and some vector type widths.
4665 if (!isHomogeneousAggregateBaseType(Ty))
4666 return false;
4667
4668 // The base type must be the same for all members. Types that
4669 // agree in both total size and mode (float vs. vector) are
4670 // treated as being equivalent here.
4671 const Type *TyPtr = Ty.getTypePtr();
4672 if (!Base) {
4673 Base = TyPtr;
4674 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4675 // so make sure to widen it explicitly.
4676 if (const VectorType *VT = Base->getAs<VectorType>()) {
4677 QualType EltTy = VT->getElementType();
4678 unsigned NumElements =
4679 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4680 Base = getContext()
4681 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4682 .getTypePtr();
4683 }
4684 }
4685
4686 if (Base->isVectorType() != TyPtr->isVectorType() ||
4687 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4688 return false;
4689 }
4690 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4691 }
4692
isHomogeneousAggregateBaseType(QualType Ty) const4693 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4694 // Homogeneous aggregates for ELFv2 must have base types of float,
4695 // double, long double, or 128-bit vectors.
4696 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4697 if (BT->getKind() == BuiltinType::Float ||
4698 BT->getKind() == BuiltinType::Double ||
4699 BT->getKind() == BuiltinType::LongDouble ||
4700 (getContext().getTargetInfo().hasFloat128Type() &&
4701 (BT->getKind() == BuiltinType::Float128))) {
4702 if (IsSoftFloatABI)
4703 return false;
4704 return true;
4705 }
4706 }
4707 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4708 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4709 return true;
4710 }
4711 return false;
4712 }
4713
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const4714 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4715 const Type *Base, uint64_t Members) const {
4716 // Vector and fp128 types require one register, other floating point types
4717 // require one or two registers depending on their size.
4718 uint32_t NumRegs =
4719 ((getContext().getTargetInfo().hasFloat128Type() &&
4720 Base->isFloat128Type()) ||
4721 Base->isVectorType()) ? 1
4722 : (getContext().getTypeSize(Base) + 63) / 64;
4723
4724 // Homogeneous Aggregates may occupy at most 8 registers.
4725 return Members * NumRegs <= 8;
4726 }
4727
4728 ABIArgInfo
classifyArgumentType(QualType Ty) const4729 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4730 Ty = useFirstFieldIfTransparentUnion(Ty);
4731
4732 if (Ty->isAnyComplexType())
4733 return ABIArgInfo::getDirect();
4734
4735 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4736 // or via reference (larger than 16 bytes).
4737 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4738 uint64_t Size = getContext().getTypeSize(Ty);
4739 if (Size > 128)
4740 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4741 else if (Size < 128) {
4742 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4743 return ABIArgInfo::getDirect(CoerceTy);
4744 }
4745 }
4746
4747 if (isAggregateTypeForABI(Ty)) {
4748 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4749 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4750
4751 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4752 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4753
4754 // ELFv2 homogeneous aggregates are passed as array types.
4755 const Type *Base = nullptr;
4756 uint64_t Members = 0;
4757 if (Kind == ELFv2 &&
4758 isHomogeneousAggregate(Ty, Base, Members)) {
4759 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4760 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4761 return ABIArgInfo::getDirect(CoerceTy);
4762 }
4763
4764 // If an aggregate may end up fully in registers, we do not
4765 // use the ByVal method, but pass the aggregate as array.
4766 // This is usually beneficial since we avoid forcing the
4767 // back-end to store the argument to memory.
4768 uint64_t Bits = getContext().getTypeSize(Ty);
4769 if (Bits > 0 && Bits <= 8 * GPRBits) {
4770 llvm::Type *CoerceTy;
4771
4772 // Types up to 8 bytes are passed as integer type (which will be
4773 // properly aligned in the argument save area doubleword).
4774 if (Bits <= GPRBits)
4775 CoerceTy =
4776 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4777 // Larger types are passed as arrays, with the base type selected
4778 // according to the required alignment in the save area.
4779 else {
4780 uint64_t RegBits = ABIAlign * 8;
4781 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4782 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4783 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4784 }
4785
4786 return ABIArgInfo::getDirect(CoerceTy);
4787 }
4788
4789 // All other aggregates are passed ByVal.
4790 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4791 /*ByVal=*/true,
4792 /*Realign=*/TyAlign > ABIAlign);
4793 }
4794
4795 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4796 : ABIArgInfo::getDirect());
4797 }
4798
4799 ABIArgInfo
classifyReturnType(QualType RetTy) const4800 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4801 if (RetTy->isVoidType())
4802 return ABIArgInfo::getIgnore();
4803
4804 if (RetTy->isAnyComplexType())
4805 return ABIArgInfo::getDirect();
4806
4807 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4808 // or via reference (larger than 16 bytes).
4809 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4810 uint64_t Size = getContext().getTypeSize(RetTy);
4811 if (Size > 128)
4812 return getNaturalAlignIndirect(RetTy);
4813 else if (Size < 128) {
4814 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4815 return ABIArgInfo::getDirect(CoerceTy);
4816 }
4817 }
4818
4819 if (isAggregateTypeForABI(RetTy)) {
4820 // ELFv2 homogeneous aggregates are returned as array types.
4821 const Type *Base = nullptr;
4822 uint64_t Members = 0;
4823 if (Kind == ELFv2 &&
4824 isHomogeneousAggregate(RetTy, Base, Members)) {
4825 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4826 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4827 return ABIArgInfo::getDirect(CoerceTy);
4828 }
4829
4830 // ELFv2 small aggregates are returned in up to two registers.
4831 uint64_t Bits = getContext().getTypeSize(RetTy);
4832 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4833 if (Bits == 0)
4834 return ABIArgInfo::getIgnore();
4835
4836 llvm::Type *CoerceTy;
4837 if (Bits > GPRBits) {
4838 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4839 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4840 } else
4841 CoerceTy =
4842 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4843 return ABIArgInfo::getDirect(CoerceTy);
4844 }
4845
4846 // All other aggregates are returned indirectly.
4847 return getNaturalAlignIndirect(RetTy);
4848 }
4849
4850 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4851 : ABIArgInfo::getDirect());
4852 }
4853
4854 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4855 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4856 QualType Ty) const {
4857 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4858 TypeInfo.second = getParamTypeAlignment(Ty);
4859
4860 CharUnits SlotSize = CharUnits::fromQuantity(8);
4861
4862 // If we have a complex type and the base type is smaller than 8 bytes,
4863 // the ABI calls for the real and imaginary parts to be right-adjusted
4864 // in separate doublewords. However, Clang expects us to produce a
4865 // pointer to a structure with the two parts packed tightly. So generate
4866 // loads of the real and imaginary parts relative to the va_list pointer,
4867 // and store them to a temporary structure.
4868 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4869 CharUnits EltSize = TypeInfo.first / 2;
4870 if (EltSize < SlotSize) {
4871 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4872 SlotSize * 2, SlotSize,
4873 SlotSize, /*AllowHigher*/ true);
4874
4875 Address RealAddr = Addr;
4876 Address ImagAddr = RealAddr;
4877 if (CGF.CGM.getDataLayout().isBigEndian()) {
4878 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4879 SlotSize - EltSize);
4880 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4881 2 * SlotSize - EltSize);
4882 } else {
4883 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4884 }
4885
4886 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4887 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4888 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4889 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4890 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4891
4892 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4893 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4894 /*init*/ true);
4895 return Temp;
4896 }
4897 }
4898
4899 // Otherwise, just use the general rule.
4900 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4901 TypeInfo, SlotSize, /*AllowHigher*/ true);
4902 }
4903
4904 static bool
PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)4905 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4906 llvm::Value *Address) {
4907 // This is calculated from the LLVM and GCC tables and verified
4908 // against gcc output. AFAIK all ABIs use the same encoding.
4909
4910 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4911
4912 llvm::IntegerType *i8 = CGF.Int8Ty;
4913 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4914 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4915 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4916
4917 // 0-31: r0-31, the 8-byte general-purpose registers
4918 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4919
4920 // 32-63: fp0-31, the 8-byte floating-point registers
4921 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4922
4923 // 64-67 are various 8-byte special-purpose registers:
4924 // 64: mq
4925 // 65: lr
4926 // 66: ctr
4927 // 67: ap
4928 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4929
4930 // 68-76 are various 4-byte special-purpose registers:
4931 // 68-75 cr0-7
4932 // 76: xer
4933 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4934
4935 // 77-108: v0-31, the 16-byte vector registers
4936 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4937
4938 // 109: vrsave
4939 // 110: vscr
4940 // 111: spe_acc
4941 // 112: spefscr
4942 // 113: sfp
4943 // 114: tfhar
4944 // 115: tfiar
4945 // 116: texasr
4946 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4947
4948 return false;
4949 }
4950
4951 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4952 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4953 CodeGen::CodeGenFunction &CGF,
4954 llvm::Value *Address) const {
4955
4956 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4957 }
4958
4959 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4960 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4961 llvm::Value *Address) const {
4962
4963 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4964 }
4965
4966 //===----------------------------------------------------------------------===//
4967 // AArch64 ABI Implementation
4968 //===----------------------------------------------------------------------===//
4969
4970 namespace {
4971
4972 class AArch64ABIInfo : public SwiftABIInfo {
4973 public:
4974 enum ABIKind {
4975 AAPCS = 0,
4976 DarwinPCS,
4977 Win64
4978 };
4979
4980 private:
4981 ABIKind Kind;
4982
4983 public:
AArch64ABIInfo(CodeGenTypes & CGT,ABIKind Kind)4984 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4985 : SwiftABIInfo(CGT), Kind(Kind) {}
4986
4987 private:
getABIKind() const4988 ABIKind getABIKind() const { return Kind; }
isDarwinPCS() const4989 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4990
4991 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
4992 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4993 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4994 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4995 uint64_t Members) const override;
4996
4997 bool isIllegalVectorType(QualType Ty) const;
4998
computeInfo(CGFunctionInfo & FI) const4999 void computeInfo(CGFunctionInfo &FI) const override {
5000 if (!::classifyReturnType(getCXXABI(), FI, *this))
5001 FI.getReturnInfo() =
5002 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5003
5004 for (auto &it : FI.arguments())
5005 it.info = classifyArgumentType(it.type);
5006 }
5007
5008 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5009 CodeGenFunction &CGF) const;
5010
5011 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5012 CodeGenFunction &CGF) const;
5013
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5014 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5015 QualType Ty) const override {
5016 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5017 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5018 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5019 }
5020
5021 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5022 QualType Ty) const override;
5023
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const5024 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5025 bool asReturnValue) const override {
5026 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5027 }
isSwiftErrorInRegister() const5028 bool isSwiftErrorInRegister() const override {
5029 return true;
5030 }
5031
5032 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5033 unsigned elts) const override;
5034 };
5035
5036 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5037 public:
AArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind Kind)5038 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5039 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
5040
getARCRetainAutoreleasedReturnValueMarker() const5041 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5042 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5043 }
5044
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const5045 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5046 return 31;
5047 }
5048
doesReturnSlotInterfereWithArgs() const5049 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5050
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5051 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5052 CodeGen::CodeGenModule &CGM) const override {
5053 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5054 if (!FD)
5055 return;
5056
5057 CodeGenOptions::SignReturnAddressScope Scope = CGM.getCodeGenOpts().getSignReturnAddress();
5058 CodeGenOptions::SignReturnAddressKeyValue Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
5059 bool BranchTargetEnforcement = CGM.getCodeGenOpts().BranchTargetEnforcement;
5060 if (const auto *TA = FD->getAttr<TargetAttr>()) {
5061 ParsedTargetAttr Attr = TA->parse();
5062 if (!Attr.BranchProtection.empty()) {
5063 TargetInfo::BranchProtectionInfo BPI;
5064 StringRef Error;
5065 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5066 BPI, Error);
5067 assert(Error.empty());
5068 Scope = BPI.SignReturnAddr;
5069 Key = BPI.SignKey;
5070 BranchTargetEnforcement = BPI.BranchTargetEnforcement;
5071 }
5072 }
5073
5074 auto *Fn = cast<llvm::Function>(GV);
5075 if (Scope != CodeGenOptions::SignReturnAddressScope::None) {
5076 Fn->addFnAttr("sign-return-address",
5077 Scope == CodeGenOptions::SignReturnAddressScope::All
5078 ? "all"
5079 : "non-leaf");
5080
5081 Fn->addFnAttr("sign-return-address-key",
5082 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5083 ? "a_key"
5084 : "b_key");
5085 }
5086
5087 if (BranchTargetEnforcement)
5088 Fn->addFnAttr("branch-target-enforcement");
5089 }
5090 };
5091
5092 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5093 public:
WindowsAArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind K)5094 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5095 : AArch64TargetCodeGenInfo(CGT, K) {}
5096
5097 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5098 CodeGen::CodeGenModule &CGM) const override;
5099
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const5100 void getDependentLibraryOption(llvm::StringRef Lib,
5101 llvm::SmallString<24> &Opt) const override {
5102 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5103 }
5104
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const5105 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5106 llvm::SmallString<32> &Opt) const override {
5107 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5108 }
5109 };
5110
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5111 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5112 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5113 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5114 if (GV->isDeclaration())
5115 return;
5116 addStackProbeTargetAttributes(D, GV, CGM);
5117 }
5118 }
5119
classifyArgumentType(QualType Ty) const5120 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5121 Ty = useFirstFieldIfTransparentUnion(Ty);
5122
5123 // Handle illegal vector types here.
5124 if (isIllegalVectorType(Ty)) {
5125 uint64_t Size = getContext().getTypeSize(Ty);
5126 // Android promotes <2 x i8> to i16, not i32
5127 if (isAndroid() && (Size <= 16)) {
5128 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5129 return ABIArgInfo::getDirect(ResType);
5130 }
5131 if (Size <= 32) {
5132 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5133 return ABIArgInfo::getDirect(ResType);
5134 }
5135 if (Size == 64) {
5136 llvm::Type *ResType =
5137 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5138 return ABIArgInfo::getDirect(ResType);
5139 }
5140 if (Size == 128) {
5141 llvm::Type *ResType =
5142 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5143 return ABIArgInfo::getDirect(ResType);
5144 }
5145 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5146 }
5147
5148 if (!isAggregateTypeForABI(Ty)) {
5149 // Treat an enum type as its underlying type.
5150 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5151 Ty = EnumTy->getDecl()->getIntegerType();
5152
5153 return (Ty->isPromotableIntegerType() && isDarwinPCS()
5154 ? ABIArgInfo::getExtend(Ty)
5155 : ABIArgInfo::getDirect());
5156 }
5157
5158 // Structures with either a non-trivial destructor or a non-trivial
5159 // copy constructor are always indirect.
5160 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5161 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5162 CGCXXABI::RAA_DirectInMemory);
5163 }
5164
5165 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5166 // elsewhere for GNU compatibility.
5167 uint64_t Size = getContext().getTypeSize(Ty);
5168 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5169 if (IsEmpty || Size == 0) {
5170 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5171 return ABIArgInfo::getIgnore();
5172
5173 // GNU C mode. The only argument that gets ignored is an empty one with size
5174 // 0.
5175 if (IsEmpty && Size == 0)
5176 return ABIArgInfo::getIgnore();
5177 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5178 }
5179
5180 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5181 const Type *Base = nullptr;
5182 uint64_t Members = 0;
5183 if (isHomogeneousAggregate(Ty, Base, Members)) {
5184 return ABIArgInfo::getDirect(
5185 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5186 }
5187
5188 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5189 if (Size <= 128) {
5190 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5191 // same size and alignment.
5192 if (getTarget().isRenderScriptTarget()) {
5193 return coerceToIntArray(Ty, getContext(), getVMContext());
5194 }
5195 unsigned Alignment;
5196 if (Kind == AArch64ABIInfo::AAPCS) {
5197 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5198 Alignment = Alignment < 128 ? 64 : 128;
5199 } else {
5200 Alignment = std::max(getContext().getTypeAlign(Ty),
5201 (unsigned)getTarget().getPointerWidth(0));
5202 }
5203 Size = llvm::alignTo(Size, Alignment);
5204
5205 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5206 // For aggregates with 16-byte alignment, we use i128.
5207 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5208 return ABIArgInfo::getDirect(
5209 Size == Alignment ? BaseTy
5210 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5211 }
5212
5213 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5214 }
5215
classifyReturnType(QualType RetTy,bool IsVariadic) const5216 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5217 bool IsVariadic) const {
5218 if (RetTy->isVoidType())
5219 return ABIArgInfo::getIgnore();
5220
5221 // Large vector types should be returned via memory.
5222 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5223 return getNaturalAlignIndirect(RetTy);
5224
5225 if (!isAggregateTypeForABI(RetTy)) {
5226 // Treat an enum type as its underlying type.
5227 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5228 RetTy = EnumTy->getDecl()->getIntegerType();
5229
5230 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
5231 ? ABIArgInfo::getExtend(RetTy)
5232 : ABIArgInfo::getDirect());
5233 }
5234
5235 uint64_t Size = getContext().getTypeSize(RetTy);
5236 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5237 return ABIArgInfo::getIgnore();
5238
5239 const Type *Base = nullptr;
5240 uint64_t Members = 0;
5241 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5242 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5243 IsVariadic))
5244 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5245 return ABIArgInfo::getDirect();
5246
5247 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5248 if (Size <= 128) {
5249 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5250 // same size and alignment.
5251 if (getTarget().isRenderScriptTarget()) {
5252 return coerceToIntArray(RetTy, getContext(), getVMContext());
5253 }
5254 unsigned Alignment = getContext().getTypeAlign(RetTy);
5255 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5256
5257 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5258 // For aggregates with 16-byte alignment, we use i128.
5259 if (Alignment < 128 && Size == 128) {
5260 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5261 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5262 }
5263 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5264 }
5265
5266 return getNaturalAlignIndirect(RetTy);
5267 }
5268
5269 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
isIllegalVectorType(QualType Ty) const5270 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5271 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5272 // Check whether VT is legal.
5273 unsigned NumElements = VT->getNumElements();
5274 uint64_t Size = getContext().getTypeSize(VT);
5275 // NumElements should be power of 2.
5276 if (!llvm::isPowerOf2_32(NumElements))
5277 return true;
5278
5279 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5280 // vectors for some reason.
5281 llvm::Triple Triple = getTarget().getTriple();
5282 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5283 Triple.isOSBinFormatMachO())
5284 return Size <= 32;
5285
5286 return Size != 64 && (Size != 128 || NumElements == 1);
5287 }
5288 return false;
5289 }
5290
isLegalVectorTypeForSwift(CharUnits totalSize,llvm::Type * eltTy,unsigned elts) const5291 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5292 llvm::Type *eltTy,
5293 unsigned elts) const {
5294 if (!llvm::isPowerOf2_32(elts))
5295 return false;
5296 if (totalSize.getQuantity() != 8 &&
5297 (totalSize.getQuantity() != 16 || elts == 1))
5298 return false;
5299 return true;
5300 }
5301
isHomogeneousAggregateBaseType(QualType Ty) const5302 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5303 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5304 // point type or a short-vector type. This is the same as the 32-bit ABI,
5305 // but with the difference that any floating-point type is allowed,
5306 // including __fp16.
5307 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5308 if (BT->isFloatingPoint())
5309 return true;
5310 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5311 unsigned VecSize = getContext().getTypeSize(VT);
5312 if (VecSize == 64 || VecSize == 128)
5313 return true;
5314 }
5315 return false;
5316 }
5317
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const5318 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5319 uint64_t Members) const {
5320 return Members <= 4;
5321 }
5322
EmitAAPCSVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const5323 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5324 QualType Ty,
5325 CodeGenFunction &CGF) const {
5326 ABIArgInfo AI = classifyArgumentType(Ty);
5327 bool IsIndirect = AI.isIndirect();
5328
5329 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5330 if (IsIndirect)
5331 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5332 else if (AI.getCoerceToType())
5333 BaseTy = AI.getCoerceToType();
5334
5335 unsigned NumRegs = 1;
5336 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5337 BaseTy = ArrTy->getElementType();
5338 NumRegs = ArrTy->getNumElements();
5339 }
5340 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5341
5342 // The AArch64 va_list type and handling is specified in the Procedure Call
5343 // Standard, section B.4:
5344 //
5345 // struct {
5346 // void *__stack;
5347 // void *__gr_top;
5348 // void *__vr_top;
5349 // int __gr_offs;
5350 // int __vr_offs;
5351 // };
5352
5353 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5354 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5355 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5356 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5357
5358 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5359 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5360
5361 Address reg_offs_p = Address::invalid();
5362 llvm::Value *reg_offs = nullptr;
5363 int reg_top_index;
5364 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5365 if (!IsFPR) {
5366 // 3 is the field number of __gr_offs
5367 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5368 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5369 reg_top_index = 1; // field number for __gr_top
5370 RegSize = llvm::alignTo(RegSize, 8);
5371 } else {
5372 // 4 is the field number of __vr_offs.
5373 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5374 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5375 reg_top_index = 2; // field number for __vr_top
5376 RegSize = 16 * NumRegs;
5377 }
5378
5379 //=======================================
5380 // Find out where argument was passed
5381 //=======================================
5382
5383 // If reg_offs >= 0 we're already using the stack for this type of
5384 // argument. We don't want to keep updating reg_offs (in case it overflows,
5385 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5386 // whatever they get).
5387 llvm::Value *UsingStack = nullptr;
5388 UsingStack = CGF.Builder.CreateICmpSGE(
5389 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5390
5391 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5392
5393 // Otherwise, at least some kind of argument could go in these registers, the
5394 // question is whether this particular type is too big.
5395 CGF.EmitBlock(MaybeRegBlock);
5396
5397 // Integer arguments may need to correct register alignment (for example a
5398 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5399 // align __gr_offs to calculate the potential address.
5400 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5401 int Align = TyAlign.getQuantity();
5402
5403 reg_offs = CGF.Builder.CreateAdd(
5404 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5405 "align_regoffs");
5406 reg_offs = CGF.Builder.CreateAnd(
5407 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5408 "aligned_regoffs");
5409 }
5410
5411 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5412 // The fact that this is done unconditionally reflects the fact that
5413 // allocating an argument to the stack also uses up all the remaining
5414 // registers of the appropriate kind.
5415 llvm::Value *NewOffset = nullptr;
5416 NewOffset = CGF.Builder.CreateAdd(
5417 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5418 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5419
5420 // Now we're in a position to decide whether this argument really was in
5421 // registers or not.
5422 llvm::Value *InRegs = nullptr;
5423 InRegs = CGF.Builder.CreateICmpSLE(
5424 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5425
5426 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5427
5428 //=======================================
5429 // Argument was in registers
5430 //=======================================
5431
5432 // Now we emit the code for if the argument was originally passed in
5433 // registers. First start the appropriate block:
5434 CGF.EmitBlock(InRegBlock);
5435
5436 llvm::Value *reg_top = nullptr;
5437 Address reg_top_p =
5438 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5439 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5440 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5441 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5442 Address RegAddr = Address::invalid();
5443 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5444
5445 if (IsIndirect) {
5446 // If it's been passed indirectly (actually a struct), whatever we find from
5447 // stored registers or on the stack will actually be a struct **.
5448 MemTy = llvm::PointerType::getUnqual(MemTy);
5449 }
5450
5451 const Type *Base = nullptr;
5452 uint64_t NumMembers = 0;
5453 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5454 if (IsHFA && NumMembers > 1) {
5455 // Homogeneous aggregates passed in registers will have their elements split
5456 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5457 // qN+1, ...). We reload and store into a temporary local variable
5458 // contiguously.
5459 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5460 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5461 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5462 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5463 Address Tmp = CGF.CreateTempAlloca(HFATy,
5464 std::max(TyAlign, BaseTyInfo.second));
5465
5466 // On big-endian platforms, the value will be right-aligned in its slot.
5467 int Offset = 0;
5468 if (CGF.CGM.getDataLayout().isBigEndian() &&
5469 BaseTyInfo.first.getQuantity() < 16)
5470 Offset = 16 - BaseTyInfo.first.getQuantity();
5471
5472 for (unsigned i = 0; i < NumMembers; ++i) {
5473 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5474 Address LoadAddr =
5475 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5476 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5477
5478 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5479
5480 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5481 CGF.Builder.CreateStore(Elem, StoreAddr);
5482 }
5483
5484 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5485 } else {
5486 // Otherwise the object is contiguous in memory.
5487
5488 // It might be right-aligned in its slot.
5489 CharUnits SlotSize = BaseAddr.getAlignment();
5490 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5491 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5492 TySize < SlotSize) {
5493 CharUnits Offset = SlotSize - TySize;
5494 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5495 }
5496
5497 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5498 }
5499
5500 CGF.EmitBranch(ContBlock);
5501
5502 //=======================================
5503 // Argument was on the stack
5504 //=======================================
5505 CGF.EmitBlock(OnStackBlock);
5506
5507 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
5508 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5509
5510 // Again, stack arguments may need realignment. In this case both integer and
5511 // floating-point ones might be affected.
5512 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5513 int Align = TyAlign.getQuantity();
5514
5515 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5516
5517 OnStackPtr = CGF.Builder.CreateAdd(
5518 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5519 "align_stack");
5520 OnStackPtr = CGF.Builder.CreateAnd(
5521 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5522 "align_stack");
5523
5524 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5525 }
5526 Address OnStackAddr(OnStackPtr,
5527 std::max(CharUnits::fromQuantity(8), TyAlign));
5528
5529 // All stack slots are multiples of 8 bytes.
5530 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5531 CharUnits StackSize;
5532 if (IsIndirect)
5533 StackSize = StackSlotSize;
5534 else
5535 StackSize = TySize.alignTo(StackSlotSize);
5536
5537 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5538 llvm::Value *NewStack =
5539 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5540
5541 // Write the new value of __stack for the next call to va_arg
5542 CGF.Builder.CreateStore(NewStack, stack_p);
5543
5544 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5545 TySize < StackSlotSize) {
5546 CharUnits Offset = StackSlotSize - TySize;
5547 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5548 }
5549
5550 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5551
5552 CGF.EmitBranch(ContBlock);
5553
5554 //=======================================
5555 // Tidy up
5556 //=======================================
5557 CGF.EmitBlock(ContBlock);
5558
5559 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5560 OnStackAddr, OnStackBlock, "vaargs.addr");
5561
5562 if (IsIndirect)
5563 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5564 TyAlign);
5565
5566 return ResAddr;
5567 }
5568
EmitDarwinVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const5569 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5570 CodeGenFunction &CGF) const {
5571 // The backend's lowering doesn't support va_arg for aggregates or
5572 // illegal vector types. Lower VAArg here for these cases and use
5573 // the LLVM va_arg instruction for everything else.
5574 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5575 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5576
5577 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
5578 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
5579
5580 // Empty records are ignored for parameter passing purposes.
5581 if (isEmptyRecord(getContext(), Ty, true)) {
5582 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5583 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5584 return Addr;
5585 }
5586
5587 // The size of the actual thing passed, which might end up just
5588 // being a pointer for indirect types.
5589 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5590
5591 // Arguments bigger than 16 bytes which aren't homogeneous
5592 // aggregates should be passed indirectly.
5593 bool IsIndirect = false;
5594 if (TyInfo.first.getQuantity() > 16) {
5595 const Type *Base = nullptr;
5596 uint64_t Members = 0;
5597 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5598 }
5599
5600 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5601 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5602 }
5603
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5604 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5605 QualType Ty) const {
5606 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5607 CGF.getContext().getTypeInfoInChars(Ty),
5608 CharUnits::fromQuantity(8),
5609 /*allowHigherAlign*/ false);
5610 }
5611
5612 //===----------------------------------------------------------------------===//
5613 // ARM ABI Implementation
5614 //===----------------------------------------------------------------------===//
5615
5616 namespace {
5617
5618 class ARMABIInfo : public SwiftABIInfo {
5619 public:
5620 enum ABIKind {
5621 APCS = 0,
5622 AAPCS = 1,
5623 AAPCS_VFP = 2,
5624 AAPCS16_VFP = 3,
5625 };
5626
5627 private:
5628 ABIKind Kind;
5629
5630 public:
ARMABIInfo(CodeGenTypes & CGT,ABIKind _Kind)5631 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5632 : SwiftABIInfo(CGT), Kind(_Kind) {
5633 setCCs();
5634 }
5635
isEABI() const5636 bool isEABI() const {
5637 switch (getTarget().getTriple().getEnvironment()) {
5638 case llvm::Triple::Android:
5639 case llvm::Triple::EABI:
5640 case llvm::Triple::EABIHF:
5641 case llvm::Triple::GNUEABI:
5642 case llvm::Triple::GNUEABIHF:
5643 case llvm::Triple::MuslEABI:
5644 case llvm::Triple::MuslEABIHF:
5645 return true;
5646 default:
5647 return false;
5648 }
5649 }
5650
isEABIHF() const5651 bool isEABIHF() const {
5652 switch (getTarget().getTriple().getEnvironment()) {
5653 case llvm::Triple::EABIHF:
5654 case llvm::Triple::GNUEABIHF:
5655 case llvm::Triple::MuslEABIHF:
5656 return true;
5657 default:
5658 return false;
5659 }
5660 }
5661
getABIKind() const5662 ABIKind getABIKind() const { return Kind; }
5663
5664 private:
5665 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
5666 unsigned functionCallConv) const;
5667 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
5668 unsigned functionCallConv) const;
5669 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
5670 uint64_t Members) const;
5671 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5672 bool isIllegalVectorType(QualType Ty) const;
5673 bool containsAnyFP16Vectors(QualType Ty) const;
5674
5675 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5676 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5677 uint64_t Members) const override;
5678
5679 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
5680
5681 void computeInfo(CGFunctionInfo &FI) const override;
5682
5683 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5684 QualType Ty) const override;
5685
5686 llvm::CallingConv::ID getLLVMDefaultCC() const;
5687 llvm::CallingConv::ID getABIDefaultCC() const;
5688 void setCCs();
5689
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const5690 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5691 bool asReturnValue) const override {
5692 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5693 }
isSwiftErrorInRegister() const5694 bool isSwiftErrorInRegister() const override {
5695 return true;
5696 }
5697 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5698 unsigned elts) const override;
5699 };
5700
5701 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5702 public:
ARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)5703 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5704 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5705
getABIInfo() const5706 const ARMABIInfo &getABIInfo() const {
5707 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5708 }
5709
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const5710 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5711 return 13;
5712 }
5713
getARCRetainAutoreleasedReturnValueMarker() const5714 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5715 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5716 }
5717
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const5718 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5719 llvm::Value *Address) const override {
5720 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5721
5722 // 0-15 are the 16 integer registers.
5723 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5724 return false;
5725 }
5726
getSizeOfUnwindException() const5727 unsigned getSizeOfUnwindException() const override {
5728 if (getABIInfo().isEABI()) return 88;
5729 return TargetCodeGenInfo::getSizeOfUnwindException();
5730 }
5731
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5732 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5733 CodeGen::CodeGenModule &CGM) const override {
5734 if (GV->isDeclaration())
5735 return;
5736 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5737 if (!FD)
5738 return;
5739
5740 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5741 if (!Attr)
5742 return;
5743
5744 const char *Kind;
5745 switch (Attr->getInterrupt()) {
5746 case ARMInterruptAttr::Generic: Kind = ""; break;
5747 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5748 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5749 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5750 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5751 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5752 }
5753
5754 llvm::Function *Fn = cast<llvm::Function>(GV);
5755
5756 Fn->addFnAttr("interrupt", Kind);
5757
5758 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5759 if (ABI == ARMABIInfo::APCS)
5760 return;
5761
5762 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5763 // however this is not necessarily true on taking any interrupt. Instruct
5764 // the backend to perform a realignment as part of the function prologue.
5765 llvm::AttrBuilder B;
5766 B.addStackAlignmentAttr(8);
5767 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5768 }
5769 };
5770
5771 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5772 public:
WindowsARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)5773 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5774 : ARMTargetCodeGenInfo(CGT, K) {}
5775
5776 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5777 CodeGen::CodeGenModule &CGM) const override;
5778
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const5779 void getDependentLibraryOption(llvm::StringRef Lib,
5780 llvm::SmallString<24> &Opt) const override {
5781 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5782 }
5783
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const5784 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5785 llvm::SmallString<32> &Opt) const override {
5786 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5787 }
5788 };
5789
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5790 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5791 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5792 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5793 if (GV->isDeclaration())
5794 return;
5795 addStackProbeTargetAttributes(D, GV, CGM);
5796 }
5797 }
5798
computeInfo(CGFunctionInfo & FI) const5799 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5800 if (!::classifyReturnType(getCXXABI(), FI, *this))
5801 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
5802 FI.getCallingConvention());
5803
5804 for (auto &I : FI.arguments())
5805 I.info = classifyArgumentType(I.type, FI.isVariadic(),
5806 FI.getCallingConvention());
5807
5808
5809 // Always honor user-specified calling convention.
5810 if (FI.getCallingConvention() != llvm::CallingConv::C)
5811 return;
5812
5813 llvm::CallingConv::ID cc = getRuntimeCC();
5814 if (cc != llvm::CallingConv::C)
5815 FI.setEffectiveCallingConvention(cc);
5816 }
5817
5818 /// Return the default calling convention that LLVM will use.
getLLVMDefaultCC() const5819 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5820 // The default calling convention that LLVM will infer.
5821 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5822 return llvm::CallingConv::ARM_AAPCS_VFP;
5823 else if (isEABI())
5824 return llvm::CallingConv::ARM_AAPCS;
5825 else
5826 return llvm::CallingConv::ARM_APCS;
5827 }
5828
5829 /// Return the calling convention that our ABI would like us to use
5830 /// as the C calling convention.
getABIDefaultCC() const5831 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5832 switch (getABIKind()) {
5833 case APCS: return llvm::CallingConv::ARM_APCS;
5834 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5835 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5836 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5837 }
5838 llvm_unreachable("bad ABI kind");
5839 }
5840
setCCs()5841 void ARMABIInfo::setCCs() {
5842 assert(getRuntimeCC() == llvm::CallingConv::C);
5843
5844 // Don't muddy up the IR with a ton of explicit annotations if
5845 // they'd just match what LLVM will infer from the triple.
5846 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5847 if (abiCC != getLLVMDefaultCC())
5848 RuntimeCC = abiCC;
5849 }
5850
coerceIllegalVector(QualType Ty) const5851 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
5852 uint64_t Size = getContext().getTypeSize(Ty);
5853 if (Size <= 32) {
5854 llvm::Type *ResType =
5855 llvm::Type::getInt32Ty(getVMContext());
5856 return ABIArgInfo::getDirect(ResType);
5857 }
5858 if (Size == 64 || Size == 128) {
5859 llvm::Type *ResType = llvm::VectorType::get(
5860 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5861 return ABIArgInfo::getDirect(ResType);
5862 }
5863 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5864 }
5865
classifyHomogeneousAggregate(QualType Ty,const Type * Base,uint64_t Members) const5866 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
5867 const Type *Base,
5868 uint64_t Members) const {
5869 assert(Base && "Base class should be set for homogeneous aggregate");
5870 // Base can be a floating-point or a vector.
5871 if (const VectorType *VT = Base->getAs<VectorType>()) {
5872 // FP16 vectors should be converted to integer vectors
5873 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
5874 uint64_t Size = getContext().getTypeSize(VT);
5875 llvm::Type *NewVecTy = llvm::VectorType::get(
5876 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5877 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5878 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5879 }
5880 }
5881 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5882 }
5883
classifyArgumentType(QualType Ty,bool isVariadic,unsigned functionCallConv) const5884 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
5885 unsigned functionCallConv) const {
5886 // 6.1.2.1 The following argument types are VFP CPRCs:
5887 // A single-precision floating-point type (including promoted
5888 // half-precision types); A double-precision floating-point type;
5889 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5890 // with a Base Type of a single- or double-precision floating-point type,
5891 // 64-bit containerized vectors or 128-bit containerized vectors with one
5892 // to four Elements.
5893 // Variadic functions should always marshal to the base standard.
5894 bool IsAAPCS_VFP =
5895 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
5896
5897 Ty = useFirstFieldIfTransparentUnion(Ty);
5898
5899 // Handle illegal vector types here.
5900 if (isIllegalVectorType(Ty))
5901 return coerceIllegalVector(Ty);
5902
5903 // _Float16 and __fp16 get passed as if it were an int or float, but with
5904 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
5905 // half type natively, and does not need to interwork with AAPCS code.
5906 if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
5907 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5908 llvm::Type *ResType = IsAAPCS_VFP ?
5909 llvm::Type::getFloatTy(getVMContext()) :
5910 llvm::Type::getInt32Ty(getVMContext());
5911 return ABIArgInfo::getDirect(ResType);
5912 }
5913
5914 if (!isAggregateTypeForABI(Ty)) {
5915 // Treat an enum type as its underlying type.
5916 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5917 Ty = EnumTy->getDecl()->getIntegerType();
5918 }
5919
5920 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
5921 : ABIArgInfo::getDirect());
5922 }
5923
5924 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5925 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5926 }
5927
5928 // Ignore empty records.
5929 if (isEmptyRecord(getContext(), Ty, true))
5930 return ABIArgInfo::getIgnore();
5931
5932 if (IsAAPCS_VFP) {
5933 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5934 // into VFP registers.
5935 const Type *Base = nullptr;
5936 uint64_t Members = 0;
5937 if (isHomogeneousAggregate(Ty, Base, Members))
5938 return classifyHomogeneousAggregate(Ty, Base, Members);
5939 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5940 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5941 // this convention even for a variadic function: the backend will use GPRs
5942 // if needed.
5943 const Type *Base = nullptr;
5944 uint64_t Members = 0;
5945 if (isHomogeneousAggregate(Ty, Base, Members)) {
5946 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5947 llvm::Type *Ty =
5948 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5949 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5950 }
5951 }
5952
5953 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5954 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5955 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5956 // bigger than 128-bits, they get placed in space allocated by the caller,
5957 // and a pointer is passed.
5958 return ABIArgInfo::getIndirect(
5959 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5960 }
5961
5962 // Support byval for ARM.
5963 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5964 // most 8-byte. We realign the indirect argument if type alignment is bigger
5965 // than ABI alignment.
5966 uint64_t ABIAlign = 4;
5967 uint64_t TyAlign;
5968 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5969 getABIKind() == ARMABIInfo::AAPCS) {
5970 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
5971 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5972 } else {
5973 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5974 }
5975 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5976 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5977 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5978 /*ByVal=*/true,
5979 /*Realign=*/TyAlign > ABIAlign);
5980 }
5981
5982 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5983 // same size and alignment.
5984 if (getTarget().isRenderScriptTarget()) {
5985 return coerceToIntArray(Ty, getContext(), getVMContext());
5986 }
5987
5988 // Otherwise, pass by coercing to a structure of the appropriate size.
5989 llvm::Type* ElemTy;
5990 unsigned SizeRegs;
5991 // FIXME: Try to match the types of the arguments more accurately where
5992 // we can.
5993 if (TyAlign <= 4) {
5994 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5995 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5996 } else {
5997 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5998 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5999 }
6000
6001 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
6002 }
6003
isIntegerLikeType(QualType Ty,ASTContext & Context,llvm::LLVMContext & VMContext)6004 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
6005 llvm::LLVMContext &VMContext) {
6006 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
6007 // is called integer-like if its size is less than or equal to one word, and
6008 // the offset of each of its addressable sub-fields is zero.
6009
6010 uint64_t Size = Context.getTypeSize(Ty);
6011
6012 // Check that the type fits in a word.
6013 if (Size > 32)
6014 return false;
6015
6016 // FIXME: Handle vector types!
6017 if (Ty->isVectorType())
6018 return false;
6019
6020 // Float types are never treated as "integer like".
6021 if (Ty->isRealFloatingType())
6022 return false;
6023
6024 // If this is a builtin or pointer type then it is ok.
6025 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6026 return true;
6027
6028 // Small complex integer types are "integer like".
6029 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6030 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6031
6032 // Single element and zero sized arrays should be allowed, by the definition
6033 // above, but they are not.
6034
6035 // Otherwise, it must be a record type.
6036 const RecordType *RT = Ty->getAs<RecordType>();
6037 if (!RT) return false;
6038
6039 // Ignore records with flexible arrays.
6040 const RecordDecl *RD = RT->getDecl();
6041 if (RD->hasFlexibleArrayMember())
6042 return false;
6043
6044 // Check that all sub-fields are at offset 0, and are themselves "integer
6045 // like".
6046 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6047
6048 bool HadField = false;
6049 unsigned idx = 0;
6050 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6051 i != e; ++i, ++idx) {
6052 const FieldDecl *FD = *i;
6053
6054 // Bit-fields are not addressable, we only need to verify they are "integer
6055 // like". We still have to disallow a subsequent non-bitfield, for example:
6056 // struct { int : 0; int x }
6057 // is non-integer like according to gcc.
6058 if (FD->isBitField()) {
6059 if (!RD->isUnion())
6060 HadField = true;
6061
6062 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6063 return false;
6064
6065 continue;
6066 }
6067
6068 // Check if this field is at offset 0.
6069 if (Layout.getFieldOffset(idx) != 0)
6070 return false;
6071
6072 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6073 return false;
6074
6075 // Only allow at most one field in a structure. This doesn't match the
6076 // wording above, but follows gcc in situations with a field following an
6077 // empty structure.
6078 if (!RD->isUnion()) {
6079 if (HadField)
6080 return false;
6081
6082 HadField = true;
6083 }
6084 }
6085
6086 return true;
6087 }
6088
classifyReturnType(QualType RetTy,bool isVariadic,unsigned functionCallConv) const6089 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6090 unsigned functionCallConv) const {
6091
6092 // Variadic functions should always marshal to the base standard.
6093 bool IsAAPCS_VFP =
6094 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6095
6096 if (RetTy->isVoidType())
6097 return ABIArgInfo::getIgnore();
6098
6099 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6100 // Large vector types should be returned via memory.
6101 if (getContext().getTypeSize(RetTy) > 128)
6102 return getNaturalAlignIndirect(RetTy);
6103 // FP16 vectors should be converted to integer vectors
6104 if (!getTarget().hasLegalHalfType() &&
6105 (VT->getElementType()->isFloat16Type() ||
6106 VT->getElementType()->isHalfType()))
6107 return coerceIllegalVector(RetTy);
6108 }
6109
6110 // _Float16 and __fp16 get returned as if it were an int or float, but with
6111 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
6112 // half type natively, and does not need to interwork with AAPCS code.
6113 if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
6114 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
6115 llvm::Type *ResType = IsAAPCS_VFP ?
6116 llvm::Type::getFloatTy(getVMContext()) :
6117 llvm::Type::getInt32Ty(getVMContext());
6118 return ABIArgInfo::getDirect(ResType);
6119 }
6120
6121 if (!isAggregateTypeForABI(RetTy)) {
6122 // Treat an enum type as its underlying type.
6123 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6124 RetTy = EnumTy->getDecl()->getIntegerType();
6125
6126 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6127 : ABIArgInfo::getDirect();
6128 }
6129
6130 // Are we following APCS?
6131 if (getABIKind() == APCS) {
6132 if (isEmptyRecord(getContext(), RetTy, false))
6133 return ABIArgInfo::getIgnore();
6134
6135 // Complex types are all returned as packed integers.
6136 //
6137 // FIXME: Consider using 2 x vector types if the back end handles them
6138 // correctly.
6139 if (RetTy->isAnyComplexType())
6140 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6141 getVMContext(), getContext().getTypeSize(RetTy)));
6142
6143 // Integer like structures are returned in r0.
6144 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6145 // Return in the smallest viable integer type.
6146 uint64_t Size = getContext().getTypeSize(RetTy);
6147 if (Size <= 8)
6148 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6149 if (Size <= 16)
6150 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6151 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6152 }
6153
6154 // Otherwise return in memory.
6155 return getNaturalAlignIndirect(RetTy);
6156 }
6157
6158 // Otherwise this is an AAPCS variant.
6159
6160 if (isEmptyRecord(getContext(), RetTy, true))
6161 return ABIArgInfo::getIgnore();
6162
6163 // Check for homogeneous aggregates with AAPCS-VFP.
6164 if (IsAAPCS_VFP) {
6165 const Type *Base = nullptr;
6166 uint64_t Members = 0;
6167 if (isHomogeneousAggregate(RetTy, Base, Members))
6168 return classifyHomogeneousAggregate(RetTy, Base, Members);
6169 }
6170
6171 // Aggregates <= 4 bytes are returned in r0; other aggregates
6172 // are returned indirectly.
6173 uint64_t Size = getContext().getTypeSize(RetTy);
6174 if (Size <= 32) {
6175 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6176 // same size and alignment.
6177 if (getTarget().isRenderScriptTarget()) {
6178 return coerceToIntArray(RetTy, getContext(), getVMContext());
6179 }
6180 if (getDataLayout().isBigEndian())
6181 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6182 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6183
6184 // Return in the smallest viable integer type.
6185 if (Size <= 8)
6186 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6187 if (Size <= 16)
6188 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6189 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6190 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6191 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6192 llvm::Type *CoerceTy =
6193 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6194 return ABIArgInfo::getDirect(CoerceTy);
6195 }
6196
6197 return getNaturalAlignIndirect(RetTy);
6198 }
6199
6200 /// isIllegalVector - check whether Ty is an illegal vector type.
isIllegalVectorType(QualType Ty) const6201 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6202 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6203 // On targets that don't support FP16, FP16 is expanded into float, and we
6204 // don't want the ABI to depend on whether or not FP16 is supported in
6205 // hardware. Thus return false to coerce FP16 vectors into integer vectors.
6206 if (!getTarget().hasLegalHalfType() &&
6207 (VT->getElementType()->isFloat16Type() ||
6208 VT->getElementType()->isHalfType()))
6209 return true;
6210 if (isAndroid()) {
6211 // Android shipped using Clang 3.1, which supported a slightly different
6212 // vector ABI. The primary differences were that 3-element vector types
6213 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6214 // accepts that legacy behavior for Android only.
6215 // Check whether VT is legal.
6216 unsigned NumElements = VT->getNumElements();
6217 // NumElements should be power of 2 or equal to 3.
6218 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6219 return true;
6220 } else {
6221 // Check whether VT is legal.
6222 unsigned NumElements = VT->getNumElements();
6223 uint64_t Size = getContext().getTypeSize(VT);
6224 // NumElements should be power of 2.
6225 if (!llvm::isPowerOf2_32(NumElements))
6226 return true;
6227 // Size should be greater than 32 bits.
6228 return Size <= 32;
6229 }
6230 }
6231 return false;
6232 }
6233
6234 /// Return true if a type contains any 16-bit floating point vectors
containsAnyFP16Vectors(QualType Ty) const6235 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6236 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6237 uint64_t NElements = AT->getSize().getZExtValue();
6238 if (NElements == 0)
6239 return false;
6240 return containsAnyFP16Vectors(AT->getElementType());
6241 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6242 const RecordDecl *RD = RT->getDecl();
6243
6244 // If this is a C++ record, check the bases first.
6245 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6246 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6247 return containsAnyFP16Vectors(B.getType());
6248 }))
6249 return true;
6250
6251 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6252 return FD && containsAnyFP16Vectors(FD->getType());
6253 }))
6254 return true;
6255
6256 return false;
6257 } else {
6258 if (const VectorType *VT = Ty->getAs<VectorType>())
6259 return (VT->getElementType()->isFloat16Type() ||
6260 VT->getElementType()->isHalfType());
6261 return false;
6262 }
6263 }
6264
isLegalVectorTypeForSwift(CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts) const6265 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6266 llvm::Type *eltTy,
6267 unsigned numElts) const {
6268 if (!llvm::isPowerOf2_32(numElts))
6269 return false;
6270 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6271 if (size > 64)
6272 return false;
6273 if (vectorSize.getQuantity() != 8 &&
6274 (vectorSize.getQuantity() != 16 || numElts == 1))
6275 return false;
6276 return true;
6277 }
6278
isHomogeneousAggregateBaseType(QualType Ty) const6279 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6280 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6281 // double, or 64-bit or 128-bit vectors.
6282 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6283 if (BT->getKind() == BuiltinType::Float ||
6284 BT->getKind() == BuiltinType::Double ||
6285 BT->getKind() == BuiltinType::LongDouble)
6286 return true;
6287 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6288 unsigned VecSize = getContext().getTypeSize(VT);
6289 if (VecSize == 64 || VecSize == 128)
6290 return true;
6291 }
6292 return false;
6293 }
6294
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const6295 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6296 uint64_t Members) const {
6297 return Members <= 4;
6298 }
6299
isEffectivelyAAPCS_VFP(unsigned callConvention,bool acceptHalf) const6300 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6301 bool acceptHalf) const {
6302 // Give precedence to user-specified calling conventions.
6303 if (callConvention != llvm::CallingConv::C)
6304 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6305 else
6306 return (getABIKind() == AAPCS_VFP) ||
6307 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6308 }
6309
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6310 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6311 QualType Ty) const {
6312 CharUnits SlotSize = CharUnits::fromQuantity(4);
6313
6314 // Empty records are ignored for parameter passing purposes.
6315 if (isEmptyRecord(getContext(), Ty, true)) {
6316 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6317 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6318 return Addr;
6319 }
6320
6321 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6322 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6323
6324 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6325 bool IsIndirect = false;
6326 const Type *Base = nullptr;
6327 uint64_t Members = 0;
6328 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6329 IsIndirect = true;
6330
6331 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6332 // allocated by the caller.
6333 } else if (TySize > CharUnits::fromQuantity(16) &&
6334 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6335 !isHomogeneousAggregate(Ty, Base, Members)) {
6336 IsIndirect = true;
6337
6338 // Otherwise, bound the type's ABI alignment.
6339 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6340 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6341 // Our callers should be prepared to handle an under-aligned address.
6342 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6343 getABIKind() == ARMABIInfo::AAPCS) {
6344 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6345 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6346 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6347 // ARMv7k allows type alignment up to 16 bytes.
6348 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6349 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6350 } else {
6351 TyAlignForABI = CharUnits::fromQuantity(4);
6352 }
6353
6354 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6355 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6356 SlotSize, /*AllowHigherAlign*/ true);
6357 }
6358
6359 //===----------------------------------------------------------------------===//
6360 // NVPTX ABI Implementation
6361 //===----------------------------------------------------------------------===//
6362
6363 namespace {
6364
6365 class NVPTXABIInfo : public ABIInfo {
6366 public:
NVPTXABIInfo(CodeGenTypes & CGT)6367 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6368
6369 ABIArgInfo classifyReturnType(QualType RetTy) const;
6370 ABIArgInfo classifyArgumentType(QualType Ty) const;
6371
6372 void computeInfo(CGFunctionInfo &FI) const override;
6373 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6374 QualType Ty) const override;
6375 };
6376
6377 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6378 public:
NVPTXTargetCodeGenInfo(CodeGenTypes & CGT)6379 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6380 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6381
6382 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6383 CodeGen::CodeGenModule &M) const override;
6384 bool shouldEmitStaticExternCAliases() const override;
6385
6386 private:
6387 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6388 // resulting MDNode to the nvvm.annotations MDNode.
6389 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6390 };
6391
6392 /// Checks if the type is unsupported directly by the current target.
isUnsupportedType(ASTContext & Context,QualType T)6393 static bool isUnsupportedType(ASTContext &Context, QualType T) {
6394 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
6395 return true;
6396 if (!Context.getTargetInfo().hasFloat128Type() &&
6397 (T->isFloat128Type() ||
6398 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
6399 return true;
6400 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
6401 Context.getTypeSize(T) > 64)
6402 return true;
6403 if (const auto *AT = T->getAsArrayTypeUnsafe())
6404 return isUnsupportedType(Context, AT->getElementType());
6405 const auto *RT = T->getAs<RecordType>();
6406 if (!RT)
6407 return false;
6408 const RecordDecl *RD = RT->getDecl();
6409
6410 // If this is a C++ record, check the bases first.
6411 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6412 for (const CXXBaseSpecifier &I : CXXRD->bases())
6413 if (isUnsupportedType(Context, I.getType()))
6414 return true;
6415
6416 for (const FieldDecl *I : RD->fields())
6417 if (isUnsupportedType(Context, I->getType()))
6418 return true;
6419 return false;
6420 }
6421
6422 /// Coerce the given type into an array with maximum allowed size of elements.
coerceToIntArrayWithLimit(QualType Ty,ASTContext & Context,llvm::LLVMContext & LLVMContext,unsigned MaxSize)6423 static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
6424 llvm::LLVMContext &LLVMContext,
6425 unsigned MaxSize) {
6426 // Alignment and Size are measured in bits.
6427 const uint64_t Size = Context.getTypeSize(Ty);
6428 const uint64_t Alignment = Context.getTypeAlign(Ty);
6429 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6430 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
6431 const uint64_t NumElements = (Size + Div - 1) / Div;
6432 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
6433 }
6434
classifyReturnType(QualType RetTy) const6435 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6436 if (RetTy->isVoidType())
6437 return ABIArgInfo::getIgnore();
6438
6439 if (getContext().getLangOpts().OpenMP &&
6440 getContext().getLangOpts().OpenMPIsDevice &&
6441 isUnsupportedType(getContext(), RetTy))
6442 return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
6443
6444 // note: this is different from default ABI
6445 if (!RetTy->isScalarType())
6446 return ABIArgInfo::getDirect();
6447
6448 // Treat an enum type as its underlying type.
6449 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6450 RetTy = EnumTy->getDecl()->getIntegerType();
6451
6452 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6453 : ABIArgInfo::getDirect());
6454 }
6455
classifyArgumentType(QualType Ty) const6456 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6457 // Treat an enum type as its underlying type.
6458 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6459 Ty = EnumTy->getDecl()->getIntegerType();
6460
6461 // Return aggregates type as indirect by value
6462 if (isAggregateTypeForABI(Ty))
6463 return getNaturalAlignIndirect(Ty, /* byval */ true);
6464
6465 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
6466 : ABIArgInfo::getDirect());
6467 }
6468
computeInfo(CGFunctionInfo & FI) const6469 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6470 if (!getCXXABI().classifyReturnType(FI))
6471 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6472 for (auto &I : FI.arguments())
6473 I.info = classifyArgumentType(I.type);
6474
6475 // Always honor user-specified calling convention.
6476 if (FI.getCallingConvention() != llvm::CallingConv::C)
6477 return;
6478
6479 FI.setEffectiveCallingConvention(getRuntimeCC());
6480 }
6481
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6482 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6483 QualType Ty) const {
6484 llvm_unreachable("NVPTX does not support varargs");
6485 }
6486
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6487 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6488 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6489 if (GV->isDeclaration())
6490 return;
6491 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6492 if (!FD) return;
6493
6494 llvm::Function *F = cast<llvm::Function>(GV);
6495
6496 // Perform special handling in OpenCL mode
6497 if (M.getLangOpts().OpenCL) {
6498 // Use OpenCL function attributes to check for kernel functions
6499 // By default, all functions are device functions
6500 if (FD->hasAttr<OpenCLKernelAttr>()) {
6501 // OpenCL __kernel functions get kernel metadata
6502 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6503 addNVVMMetadata(F, "kernel", 1);
6504 // And kernel functions are not subject to inlining
6505 F->addFnAttr(llvm::Attribute::NoInline);
6506 }
6507 }
6508
6509 // Perform special handling in CUDA mode.
6510 if (M.getLangOpts().CUDA) {
6511 // CUDA __global__ functions get a kernel metadata entry. Since
6512 // __global__ functions cannot be called from the device, we do not
6513 // need to set the noinline attribute.
6514 if (FD->hasAttr<CUDAGlobalAttr>()) {
6515 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6516 addNVVMMetadata(F, "kernel", 1);
6517 }
6518 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6519 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6520 llvm::APSInt MaxThreads(32);
6521 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6522 if (MaxThreads > 0)
6523 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6524
6525 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6526 // not specified in __launch_bounds__ or if the user specified a 0 value,
6527 // we don't have to add a PTX directive.
6528 if (Attr->getMinBlocks()) {
6529 llvm::APSInt MinBlocks(32);
6530 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6531 if (MinBlocks > 0)
6532 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6533 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6534 }
6535 }
6536 }
6537 }
6538
addNVVMMetadata(llvm::Function * F,StringRef Name,int Operand)6539 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6540 int Operand) {
6541 llvm::Module *M = F->getParent();
6542 llvm::LLVMContext &Ctx = M->getContext();
6543
6544 // Get "nvvm.annotations" metadata node
6545 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6546
6547 llvm::Metadata *MDVals[] = {
6548 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6549 llvm::ConstantAsMetadata::get(
6550 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6551 // Append metadata to nvvm.annotations
6552 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6553 }
6554
shouldEmitStaticExternCAliases() const6555 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
6556 return false;
6557 }
6558 }
6559
6560 //===----------------------------------------------------------------------===//
6561 // SystemZ ABI Implementation
6562 //===----------------------------------------------------------------------===//
6563
6564 namespace {
6565
6566 class SystemZABIInfo : public SwiftABIInfo {
6567 bool HasVector;
6568
6569 public:
SystemZABIInfo(CodeGenTypes & CGT,bool HV)6570 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6571 : SwiftABIInfo(CGT), HasVector(HV) {}
6572
6573 bool isPromotableIntegerType(QualType Ty) const;
6574 bool isCompoundType(QualType Ty) const;
6575 bool isVectorArgumentType(QualType Ty) const;
6576 bool isFPArgumentType(QualType Ty) const;
6577 QualType GetSingleElementType(QualType Ty) const;
6578
6579 ABIArgInfo classifyReturnType(QualType RetTy) const;
6580 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6581
computeInfo(CGFunctionInfo & FI) const6582 void computeInfo(CGFunctionInfo &FI) const override {
6583 if (!getCXXABI().classifyReturnType(FI))
6584 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6585 for (auto &I : FI.arguments())
6586 I.info = classifyArgumentType(I.type);
6587 }
6588
6589 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6590 QualType Ty) const override;
6591
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const6592 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6593 bool asReturnValue) const override {
6594 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6595 }
isSwiftErrorInRegister() const6596 bool isSwiftErrorInRegister() const override {
6597 return false;
6598 }
6599 };
6600
6601 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6602 public:
SystemZTargetCodeGenInfo(CodeGenTypes & CGT,bool HasVector)6603 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6604 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6605 };
6606
6607 }
6608
isPromotableIntegerType(QualType Ty) const6609 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6610 // Treat an enum type as its underlying type.
6611 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6612 Ty = EnumTy->getDecl()->getIntegerType();
6613
6614 // Promotable integer types are required to be promoted by the ABI.
6615 if (Ty->isPromotableIntegerType())
6616 return true;
6617
6618 // 32-bit values must also be promoted.
6619 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6620 switch (BT->getKind()) {
6621 case BuiltinType::Int:
6622 case BuiltinType::UInt:
6623 return true;
6624 default:
6625 return false;
6626 }
6627 return false;
6628 }
6629
isCompoundType(QualType Ty) const6630 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6631 return (Ty->isAnyComplexType() ||
6632 Ty->isVectorType() ||
6633 isAggregateTypeForABI(Ty));
6634 }
6635
isVectorArgumentType(QualType Ty) const6636 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6637 return (HasVector &&
6638 Ty->isVectorType() &&
6639 getContext().getTypeSize(Ty) <= 128);
6640 }
6641
isFPArgumentType(QualType Ty) const6642 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6643 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6644 switch (BT->getKind()) {
6645 case BuiltinType::Float:
6646 case BuiltinType::Double:
6647 return true;
6648 default:
6649 return false;
6650 }
6651
6652 return false;
6653 }
6654
GetSingleElementType(QualType Ty) const6655 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6656 if (const RecordType *RT = Ty->getAsStructureType()) {
6657 const RecordDecl *RD = RT->getDecl();
6658 QualType Found;
6659
6660 // If this is a C++ record, check the bases first.
6661 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6662 for (const auto &I : CXXRD->bases()) {
6663 QualType Base = I.getType();
6664
6665 // Empty bases don't affect things either way.
6666 if (isEmptyRecord(getContext(), Base, true))
6667 continue;
6668
6669 if (!Found.isNull())
6670 return Ty;
6671 Found = GetSingleElementType(Base);
6672 }
6673
6674 // Check the fields.
6675 for (const auto *FD : RD->fields()) {
6676 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6677 // Unlike isSingleElementStruct(), empty structure and array fields
6678 // do count. So do anonymous bitfields that aren't zero-sized.
6679 if (getContext().getLangOpts().CPlusPlus &&
6680 FD->isZeroLengthBitField(getContext()))
6681 continue;
6682
6683 // Unlike isSingleElementStruct(), arrays do not count.
6684 // Nested structures still do though.
6685 if (!Found.isNull())
6686 return Ty;
6687 Found = GetSingleElementType(FD->getType());
6688 }
6689
6690 // Unlike isSingleElementStruct(), trailing padding is allowed.
6691 // An 8-byte aligned struct s { float f; } is passed as a double.
6692 if (!Found.isNull())
6693 return Found;
6694 }
6695
6696 return Ty;
6697 }
6698
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6699 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6700 QualType Ty) const {
6701 // Assume that va_list type is correct; should be pointer to LLVM type:
6702 // struct {
6703 // i64 __gpr;
6704 // i64 __fpr;
6705 // i8 *__overflow_arg_area;
6706 // i8 *__reg_save_area;
6707 // };
6708
6709 // Every non-vector argument occupies 8 bytes and is passed by preference
6710 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6711 // always passed on the stack.
6712 Ty = getContext().getCanonicalType(Ty);
6713 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6714 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6715 llvm::Type *DirectTy = ArgTy;
6716 ABIArgInfo AI = classifyArgumentType(Ty);
6717 bool IsIndirect = AI.isIndirect();
6718 bool InFPRs = false;
6719 bool IsVector = false;
6720 CharUnits UnpaddedSize;
6721 CharUnits DirectAlign;
6722 if (IsIndirect) {
6723 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6724 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6725 } else {
6726 if (AI.getCoerceToType())
6727 ArgTy = AI.getCoerceToType();
6728 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6729 IsVector = ArgTy->isVectorTy();
6730 UnpaddedSize = TyInfo.first;
6731 DirectAlign = TyInfo.second;
6732 }
6733 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6734 if (IsVector && UnpaddedSize > PaddedSize)
6735 PaddedSize = CharUnits::fromQuantity(16);
6736 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6737
6738 CharUnits Padding = (PaddedSize - UnpaddedSize);
6739
6740 llvm::Type *IndexTy = CGF.Int64Ty;
6741 llvm::Value *PaddedSizeV =
6742 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6743
6744 if (IsVector) {
6745 // Work out the address of a vector argument on the stack.
6746 // Vector arguments are always passed in the high bits of a
6747 // single (8 byte) or double (16 byte) stack slot.
6748 Address OverflowArgAreaPtr =
6749 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6750 Address OverflowArgArea =
6751 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6752 TyInfo.second);
6753 Address MemAddr =
6754 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6755
6756 // Update overflow_arg_area_ptr pointer
6757 llvm::Value *NewOverflowArgArea =
6758 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6759 "overflow_arg_area");
6760 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6761
6762 return MemAddr;
6763 }
6764
6765 assert(PaddedSize.getQuantity() == 8);
6766
6767 unsigned MaxRegs, RegCountField, RegSaveIndex;
6768 CharUnits RegPadding;
6769 if (InFPRs) {
6770 MaxRegs = 4; // Maximum of 4 FPR arguments
6771 RegCountField = 1; // __fpr
6772 RegSaveIndex = 16; // save offset for f0
6773 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6774 } else {
6775 MaxRegs = 5; // Maximum of 5 GPR arguments
6776 RegCountField = 0; // __gpr
6777 RegSaveIndex = 2; // save offset for r2
6778 RegPadding = Padding; // values are passed in the low bits of a GPR
6779 }
6780
6781 Address RegCountPtr =
6782 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
6783 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6784 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6785 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6786 "fits_in_regs");
6787
6788 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6789 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6790 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6791 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6792
6793 // Emit code to load the value if it was passed in registers.
6794 CGF.EmitBlock(InRegBlock);
6795
6796 // Work out the address of an argument register.
6797 llvm::Value *ScaledRegCount =
6798 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6799 llvm::Value *RegBase =
6800 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6801 + RegPadding.getQuantity());
6802 llvm::Value *RegOffset =
6803 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6804 Address RegSaveAreaPtr =
6805 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
6806 llvm::Value *RegSaveArea =
6807 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6808 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6809 "raw_reg_addr"),
6810 PaddedSize);
6811 Address RegAddr =
6812 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6813
6814 // Update the register count
6815 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6816 llvm::Value *NewRegCount =
6817 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6818 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6819 CGF.EmitBranch(ContBlock);
6820
6821 // Emit code to load the value if it was passed in memory.
6822 CGF.EmitBlock(InMemBlock);
6823
6824 // Work out the address of a stack argument.
6825 Address OverflowArgAreaPtr =
6826 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6827 Address OverflowArgArea =
6828 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6829 PaddedSize);
6830 Address RawMemAddr =
6831 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6832 Address MemAddr =
6833 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6834
6835 // Update overflow_arg_area_ptr pointer
6836 llvm::Value *NewOverflowArgArea =
6837 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6838 "overflow_arg_area");
6839 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6840 CGF.EmitBranch(ContBlock);
6841
6842 // Return the appropriate result.
6843 CGF.EmitBlock(ContBlock);
6844 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6845 MemAddr, InMemBlock, "va_arg.addr");
6846
6847 if (IsIndirect)
6848 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6849 TyInfo.second);
6850
6851 return ResAddr;
6852 }
6853
classifyReturnType(QualType RetTy) const6854 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6855 if (RetTy->isVoidType())
6856 return ABIArgInfo::getIgnore();
6857 if (isVectorArgumentType(RetTy))
6858 return ABIArgInfo::getDirect();
6859 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6860 return getNaturalAlignIndirect(RetTy);
6861 return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
6862 : ABIArgInfo::getDirect());
6863 }
6864
classifyArgumentType(QualType Ty) const6865 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6866 // Handle the generic C++ ABI.
6867 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6868 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6869
6870 // Integers and enums are extended to full register width.
6871 if (isPromotableIntegerType(Ty))
6872 return ABIArgInfo::getExtend(Ty);
6873
6874 // Handle vector types and vector-like structure types. Note that
6875 // as opposed to float-like structure types, we do not allow any
6876 // padding for vector-like structures, so verify the sizes match.
6877 uint64_t Size = getContext().getTypeSize(Ty);
6878 QualType SingleElementTy = GetSingleElementType(Ty);
6879 if (isVectorArgumentType(SingleElementTy) &&
6880 getContext().getTypeSize(SingleElementTy) == Size)
6881 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6882
6883 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6884 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6885 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6886
6887 // Handle small structures.
6888 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6889 // Structures with flexible arrays have variable length, so really
6890 // fail the size test above.
6891 const RecordDecl *RD = RT->getDecl();
6892 if (RD->hasFlexibleArrayMember())
6893 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6894
6895 // The structure is passed as an unextended integer, a float, or a double.
6896 llvm::Type *PassTy;
6897 if (isFPArgumentType(SingleElementTy)) {
6898 assert(Size == 32 || Size == 64);
6899 if (Size == 32)
6900 PassTy = llvm::Type::getFloatTy(getVMContext());
6901 else
6902 PassTy = llvm::Type::getDoubleTy(getVMContext());
6903 } else
6904 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6905 return ABIArgInfo::getDirect(PassTy);
6906 }
6907
6908 // Non-structure compounds are passed indirectly.
6909 if (isCompoundType(Ty))
6910 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6911
6912 return ABIArgInfo::getDirect(nullptr);
6913 }
6914
6915 //===----------------------------------------------------------------------===//
6916 // MSP430 ABI Implementation
6917 //===----------------------------------------------------------------------===//
6918
6919 namespace {
6920
6921 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6922 public:
MSP430TargetCodeGenInfo(CodeGenTypes & CGT)6923 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6924 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6925 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6926 CodeGen::CodeGenModule &M) const override;
6927 };
6928
6929 }
6930
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6931 void MSP430TargetCodeGenInfo::setTargetAttributes(
6932 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6933 if (GV->isDeclaration())
6934 return;
6935 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6936 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6937 if (!InterruptAttr)
6938 return;
6939
6940 // Handle 'interrupt' attribute:
6941 llvm::Function *F = cast<llvm::Function>(GV);
6942
6943 // Step 1: Set ISR calling convention.
6944 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6945
6946 // Step 2: Add attributes goodness.
6947 F->addFnAttr(llvm::Attribute::NoInline);
6948 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
6949 }
6950 }
6951
6952 //===----------------------------------------------------------------------===//
6953 // MIPS ABI Implementation. This works for both little-endian and
6954 // big-endian variants.
6955 //===----------------------------------------------------------------------===//
6956
6957 namespace {
6958 class MipsABIInfo : public ABIInfo {
6959 bool IsO32;
6960 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6961 void CoerceToIntArgs(uint64_t TySize,
6962 SmallVectorImpl<llvm::Type *> &ArgList) const;
6963 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6964 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6965 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6966 public:
MipsABIInfo(CodeGenTypes & CGT,bool _IsO32)6967 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6968 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6969 StackAlignInBytes(IsO32 ? 8 : 16) {}
6970
6971 ABIArgInfo classifyReturnType(QualType RetTy) const;
6972 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6973 void computeInfo(CGFunctionInfo &FI) const override;
6974 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6975 QualType Ty) const override;
6976 ABIArgInfo extendType(QualType Ty) const;
6977 };
6978
6979 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6980 unsigned SizeOfUnwindException;
6981 public:
MIPSTargetCodeGenInfo(CodeGenTypes & CGT,bool IsO32)6982 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6983 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6984 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6985
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const6986 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6987 return 29;
6988 }
6989
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const6990 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6991 CodeGen::CodeGenModule &CGM) const override {
6992 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6993 if (!FD) return;
6994 llvm::Function *Fn = cast<llvm::Function>(GV);
6995
6996 if (FD->hasAttr<MipsLongCallAttr>())
6997 Fn->addFnAttr("long-call");
6998 else if (FD->hasAttr<MipsShortCallAttr>())
6999 Fn->addFnAttr("short-call");
7000
7001 // Other attributes do not have a meaning for declarations.
7002 if (GV->isDeclaration())
7003 return;
7004
7005 if (FD->hasAttr<Mips16Attr>()) {
7006 Fn->addFnAttr("mips16");
7007 }
7008 else if (FD->hasAttr<NoMips16Attr>()) {
7009 Fn->addFnAttr("nomips16");
7010 }
7011
7012 if (FD->hasAttr<MicroMipsAttr>())
7013 Fn->addFnAttr("micromips");
7014 else if (FD->hasAttr<NoMicroMipsAttr>())
7015 Fn->addFnAttr("nomicromips");
7016
7017 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
7018 if (!Attr)
7019 return;
7020
7021 const char *Kind;
7022 switch (Attr->getInterrupt()) {
7023 case MipsInterruptAttr::eic: Kind = "eic"; break;
7024 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7025 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7026 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7027 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7028 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7029 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7030 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7031 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7032 }
7033
7034 Fn->addFnAttr("interrupt", Kind);
7035
7036 }
7037
7038 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7039 llvm::Value *Address) const override;
7040
getSizeOfUnwindException() const7041 unsigned getSizeOfUnwindException() const override {
7042 return SizeOfUnwindException;
7043 }
7044 };
7045 }
7046
CoerceToIntArgs(uint64_t TySize,SmallVectorImpl<llvm::Type * > & ArgList) const7047 void MipsABIInfo::CoerceToIntArgs(
7048 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7049 llvm::IntegerType *IntTy =
7050 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7051
7052 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7053 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7054 ArgList.push_back(IntTy);
7055
7056 // If necessary, add one more integer type to ArgList.
7057 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7058
7059 if (R)
7060 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7061 }
7062
7063 // In N32/64, an aligned double precision floating point field is passed in
7064 // a register.
HandleAggregates(QualType Ty,uint64_t TySize) const7065 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7066 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7067
7068 if (IsO32) {
7069 CoerceToIntArgs(TySize, ArgList);
7070 return llvm::StructType::get(getVMContext(), ArgList);
7071 }
7072
7073 if (Ty->isComplexType())
7074 return CGT.ConvertType(Ty);
7075
7076 const RecordType *RT = Ty->getAs<RecordType>();
7077
7078 // Unions/vectors are passed in integer registers.
7079 if (!RT || !RT->isStructureOrClassType()) {
7080 CoerceToIntArgs(TySize, ArgList);
7081 return llvm::StructType::get(getVMContext(), ArgList);
7082 }
7083
7084 const RecordDecl *RD = RT->getDecl();
7085 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7086 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
7087
7088 uint64_t LastOffset = 0;
7089 unsigned idx = 0;
7090 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7091
7092 // Iterate over fields in the struct/class and check if there are any aligned
7093 // double fields.
7094 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7095 i != e; ++i, ++idx) {
7096 const QualType Ty = i->getType();
7097 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7098
7099 if (!BT || BT->getKind() != BuiltinType::Double)
7100 continue;
7101
7102 uint64_t Offset = Layout.getFieldOffset(idx);
7103 if (Offset % 64) // Ignore doubles that are not aligned.
7104 continue;
7105
7106 // Add ((Offset - LastOffset) / 64) args of type i64.
7107 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7108 ArgList.push_back(I64);
7109
7110 // Add double type.
7111 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7112 LastOffset = Offset + 64;
7113 }
7114
7115 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7116 ArgList.append(IntArgList.begin(), IntArgList.end());
7117
7118 return llvm::StructType::get(getVMContext(), ArgList);
7119 }
7120
getPaddingType(uint64_t OrigOffset,uint64_t Offset) const7121 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7122 uint64_t Offset) const {
7123 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7124 return nullptr;
7125
7126 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7127 }
7128
7129 ABIArgInfo
classifyArgumentType(QualType Ty,uint64_t & Offset) const7130 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7131 Ty = useFirstFieldIfTransparentUnion(Ty);
7132
7133 uint64_t OrigOffset = Offset;
7134 uint64_t TySize = getContext().getTypeSize(Ty);
7135 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7136
7137 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7138 (uint64_t)StackAlignInBytes);
7139 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7140 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7141
7142 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7143 // Ignore empty aggregates.
7144 if (TySize == 0)
7145 return ABIArgInfo::getIgnore();
7146
7147 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7148 Offset = OrigOffset + MinABIStackAlignInBytes;
7149 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7150 }
7151
7152 // If we have reached here, aggregates are passed directly by coercing to
7153 // another structure type. Padding is inserted if the offset of the
7154 // aggregate is unaligned.
7155 ABIArgInfo ArgInfo =
7156 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7157 getPaddingType(OrigOffset, CurrOffset));
7158 ArgInfo.setInReg(true);
7159 return ArgInfo;
7160 }
7161
7162 // Treat an enum type as its underlying type.
7163 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7164 Ty = EnumTy->getDecl()->getIntegerType();
7165
7166 // All integral types are promoted to the GPR width.
7167 if (Ty->isIntegralOrEnumerationType())
7168 return extendType(Ty);
7169
7170 return ABIArgInfo::getDirect(
7171 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7172 }
7173
7174 llvm::Type*
returnAggregateInRegs(QualType RetTy,uint64_t Size) const7175 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7176 const RecordType *RT = RetTy->getAs<RecordType>();
7177 SmallVector<llvm::Type*, 8> RTList;
7178
7179 if (RT && RT->isStructureOrClassType()) {
7180 const RecordDecl *RD = RT->getDecl();
7181 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7182 unsigned FieldCnt = Layout.getFieldCount();
7183
7184 // N32/64 returns struct/classes in floating point registers if the
7185 // following conditions are met:
7186 // 1. The size of the struct/class is no larger than 128-bit.
7187 // 2. The struct/class has one or two fields all of which are floating
7188 // point types.
7189 // 3. The offset of the first field is zero (this follows what gcc does).
7190 //
7191 // Any other composite results are returned in integer registers.
7192 //
7193 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7194 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7195 for (; b != e; ++b) {
7196 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7197
7198 if (!BT || !BT->isFloatingPoint())
7199 break;
7200
7201 RTList.push_back(CGT.ConvertType(b->getType()));
7202 }
7203
7204 if (b == e)
7205 return llvm::StructType::get(getVMContext(), RTList,
7206 RD->hasAttr<PackedAttr>());
7207
7208 RTList.clear();
7209 }
7210 }
7211
7212 CoerceToIntArgs(Size, RTList);
7213 return llvm::StructType::get(getVMContext(), RTList);
7214 }
7215
classifyReturnType(QualType RetTy) const7216 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7217 uint64_t Size = getContext().getTypeSize(RetTy);
7218
7219 if (RetTy->isVoidType())
7220 return ABIArgInfo::getIgnore();
7221
7222 // O32 doesn't treat zero-sized structs differently from other structs.
7223 // However, N32/N64 ignores zero sized return values.
7224 if (!IsO32 && Size == 0)
7225 return ABIArgInfo::getIgnore();
7226
7227 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7228 if (Size <= 128) {
7229 if (RetTy->isAnyComplexType())
7230 return ABIArgInfo::getDirect();
7231
7232 // O32 returns integer vectors in registers and N32/N64 returns all small
7233 // aggregates in registers.
7234 if (!IsO32 ||
7235 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7236 ABIArgInfo ArgInfo =
7237 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7238 ArgInfo.setInReg(true);
7239 return ArgInfo;
7240 }
7241 }
7242
7243 return getNaturalAlignIndirect(RetTy);
7244 }
7245
7246 // Treat an enum type as its underlying type.
7247 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7248 RetTy = EnumTy->getDecl()->getIntegerType();
7249
7250 if (RetTy->isPromotableIntegerType())
7251 return ABIArgInfo::getExtend(RetTy);
7252
7253 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7254 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7255 return ABIArgInfo::getSignExtend(RetTy);
7256
7257 return ABIArgInfo::getDirect();
7258 }
7259
computeInfo(CGFunctionInfo & FI) const7260 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7261 ABIArgInfo &RetInfo = FI.getReturnInfo();
7262 if (!getCXXABI().classifyReturnType(FI))
7263 RetInfo = classifyReturnType(FI.getReturnType());
7264
7265 // Check if a pointer to an aggregate is passed as a hidden argument.
7266 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7267
7268 for (auto &I : FI.arguments())
7269 I.info = classifyArgumentType(I.type, Offset);
7270 }
7271
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType OrigTy) const7272 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7273 QualType OrigTy) const {
7274 QualType Ty = OrigTy;
7275
7276 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7277 // Pointers are also promoted in the same way but this only matters for N32.
7278 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7279 unsigned PtrWidth = getTarget().getPointerWidth(0);
7280 bool DidPromote = false;
7281 if ((Ty->isIntegerType() &&
7282 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7283 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7284 DidPromote = true;
7285 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7286 Ty->isSignedIntegerType());
7287 }
7288
7289 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7290
7291 // The alignment of things in the argument area is never larger than
7292 // StackAlignInBytes.
7293 TyInfo.second =
7294 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7295
7296 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7297 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7298
7299 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7300 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7301
7302
7303 // If there was a promotion, "unpromote" into a temporary.
7304 // TODO: can we just use a pointer into a subset of the original slot?
7305 if (DidPromote) {
7306 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7307 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7308
7309 // Truncate down to the right width.
7310 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7311 : CGF.IntPtrTy);
7312 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7313 if (OrigTy->isPointerType())
7314 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7315
7316 CGF.Builder.CreateStore(V, Temp);
7317 Addr = Temp;
7318 }
7319
7320 return Addr;
7321 }
7322
extendType(QualType Ty) const7323 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7324 int TySize = getContext().getTypeSize(Ty);
7325
7326 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7327 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7328 return ABIArgInfo::getSignExtend(Ty);
7329
7330 return ABIArgInfo::getExtend(Ty);
7331 }
7332
7333 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const7334 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7335 llvm::Value *Address) const {
7336 // This information comes from gcc's implementation, which seems to
7337 // as canonical as it gets.
7338
7339 // Everything on MIPS is 4 bytes. Double-precision FP registers
7340 // are aliased to pairs of single-precision FP registers.
7341 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7342
7343 // 0-31 are the general purpose registers, $0 - $31.
7344 // 32-63 are the floating-point registers, $f0 - $f31.
7345 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7346 // 66 is the (notional, I think) register for signal-handler return.
7347 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7348
7349 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7350 // They are one bit wide and ignored here.
7351
7352 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7353 // (coprocessor 1 is the FP unit)
7354 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7355 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7356 // 176-181 are the DSP accumulator registers.
7357 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7358 return false;
7359 }
7360
7361 //===----------------------------------------------------------------------===//
7362 // AVR ABI Implementation.
7363 //===----------------------------------------------------------------------===//
7364
7365 namespace {
7366 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7367 public:
AVRTargetCodeGenInfo(CodeGenTypes & CGT)7368 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7369 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
7370
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const7371 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7372 CodeGen::CodeGenModule &CGM) const override {
7373 if (GV->isDeclaration())
7374 return;
7375 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7376 if (!FD) return;
7377 auto *Fn = cast<llvm::Function>(GV);
7378
7379 if (FD->getAttr<AVRInterruptAttr>())
7380 Fn->addFnAttr("interrupt");
7381
7382 if (FD->getAttr<AVRSignalAttr>())
7383 Fn->addFnAttr("signal");
7384 }
7385 };
7386 }
7387
7388 //===----------------------------------------------------------------------===//
7389 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
7390 // Currently subclassed only to implement custom OpenCL C function attribute
7391 // handling.
7392 //===----------------------------------------------------------------------===//
7393
7394 namespace {
7395
7396 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7397 public:
TCETargetCodeGenInfo(CodeGenTypes & CGT)7398 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7399 : DefaultTargetCodeGenInfo(CGT) {}
7400
7401 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7402 CodeGen::CodeGenModule &M) const override;
7403 };
7404
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const7405 void TCETargetCodeGenInfo::setTargetAttributes(
7406 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7407 if (GV->isDeclaration())
7408 return;
7409 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7410 if (!FD) return;
7411
7412 llvm::Function *F = cast<llvm::Function>(GV);
7413
7414 if (M.getLangOpts().OpenCL) {
7415 if (FD->hasAttr<OpenCLKernelAttr>()) {
7416 // OpenCL C Kernel functions are not subject to inlining
7417 F->addFnAttr(llvm::Attribute::NoInline);
7418 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7419 if (Attr) {
7420 // Convert the reqd_work_group_size() attributes to metadata.
7421 llvm::LLVMContext &Context = F->getContext();
7422 llvm::NamedMDNode *OpenCLMetadata =
7423 M.getModule().getOrInsertNamedMetadata(
7424 "opencl.kernel_wg_size_info");
7425
7426 SmallVector<llvm::Metadata *, 5> Operands;
7427 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7428
7429 Operands.push_back(
7430 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7431 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7432 Operands.push_back(
7433 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7434 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7435 Operands.push_back(
7436 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7437 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7438
7439 // Add a boolean constant operand for "required" (true) or "hint"
7440 // (false) for implementing the work_group_size_hint attr later.
7441 // Currently always true as the hint is not yet implemented.
7442 Operands.push_back(
7443 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7444 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7445 }
7446 }
7447 }
7448 }
7449
7450 }
7451
7452 //===----------------------------------------------------------------------===//
7453 // Hexagon ABI Implementation
7454 //===----------------------------------------------------------------------===//
7455
7456 namespace {
7457
7458 class HexagonABIInfo : public ABIInfo {
7459
7460
7461 public:
HexagonABIInfo(CodeGenTypes & CGT)7462 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7463
7464 private:
7465
7466 ABIArgInfo classifyReturnType(QualType RetTy) const;
7467 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7468
7469 void computeInfo(CGFunctionInfo &FI) const override;
7470
7471 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7472 QualType Ty) const override;
7473 };
7474
7475 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7476 public:
HexagonTargetCodeGenInfo(CodeGenTypes & CGT)7477 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7478 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7479
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const7480 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7481 return 29;
7482 }
7483 };
7484
7485 }
7486
computeInfo(CGFunctionInfo & FI) const7487 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7488 if (!getCXXABI().classifyReturnType(FI))
7489 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7490 for (auto &I : FI.arguments())
7491 I.info = classifyArgumentType(I.type);
7492 }
7493
classifyArgumentType(QualType Ty) const7494 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7495 if (!isAggregateTypeForABI(Ty)) {
7496 // Treat an enum type as its underlying type.
7497 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7498 Ty = EnumTy->getDecl()->getIntegerType();
7499
7500 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
7501 : ABIArgInfo::getDirect());
7502 }
7503
7504 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7505 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7506
7507 // Ignore empty records.
7508 if (isEmptyRecord(getContext(), Ty, true))
7509 return ABIArgInfo::getIgnore();
7510
7511 uint64_t Size = getContext().getTypeSize(Ty);
7512 if (Size > 64)
7513 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7514 // Pass in the smallest viable integer type.
7515 else if (Size > 32)
7516 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7517 else if (Size > 16)
7518 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7519 else if (Size > 8)
7520 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7521 else
7522 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7523 }
7524
classifyReturnType(QualType RetTy) const7525 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7526 if (RetTy->isVoidType())
7527 return ABIArgInfo::getIgnore();
7528
7529 // Large vector types should be returned via memory.
7530 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7531 return getNaturalAlignIndirect(RetTy);
7532
7533 if (!isAggregateTypeForABI(RetTy)) {
7534 // Treat an enum type as its underlying type.
7535 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7536 RetTy = EnumTy->getDecl()->getIntegerType();
7537
7538 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
7539 : ABIArgInfo::getDirect());
7540 }
7541
7542 if (isEmptyRecord(getContext(), RetTy, true))
7543 return ABIArgInfo::getIgnore();
7544
7545 // Aggregates <= 8 bytes are returned in r0; other aggregates
7546 // are returned indirectly.
7547 uint64_t Size = getContext().getTypeSize(RetTy);
7548 if (Size <= 64) {
7549 // Return in the smallest viable integer type.
7550 if (Size <= 8)
7551 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7552 if (Size <= 16)
7553 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7554 if (Size <= 32)
7555 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7556 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7557 }
7558
7559 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7560 }
7561
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7562 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7563 QualType Ty) const {
7564 // FIXME: Someone needs to audit that this handle alignment correctly.
7565 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7566 getContext().getTypeInfoInChars(Ty),
7567 CharUnits::fromQuantity(4),
7568 /*AllowHigherAlign*/ true);
7569 }
7570
7571 //===----------------------------------------------------------------------===//
7572 // Lanai ABI Implementation
7573 //===----------------------------------------------------------------------===//
7574
7575 namespace {
7576 class LanaiABIInfo : public DefaultABIInfo {
7577 public:
LanaiABIInfo(CodeGen::CodeGenTypes & CGT)7578 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7579
7580 bool shouldUseInReg(QualType Ty, CCState &State) const;
7581
computeInfo(CGFunctionInfo & FI) const7582 void computeInfo(CGFunctionInfo &FI) const override {
7583 CCState State(FI);
7584 // Lanai uses 4 registers to pass arguments unless the function has the
7585 // regparm attribute set.
7586 if (FI.getHasRegParm()) {
7587 State.FreeRegs = FI.getRegParm();
7588 } else {
7589 State.FreeRegs = 4;
7590 }
7591
7592 if (!getCXXABI().classifyReturnType(FI))
7593 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7594 for (auto &I : FI.arguments())
7595 I.info = classifyArgumentType(I.type, State);
7596 }
7597
7598 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7599 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7600 };
7601 } // end anonymous namespace
7602
shouldUseInReg(QualType Ty,CCState & State) const7603 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7604 unsigned Size = getContext().getTypeSize(Ty);
7605 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7606
7607 if (SizeInRegs == 0)
7608 return false;
7609
7610 if (SizeInRegs > State.FreeRegs) {
7611 State.FreeRegs = 0;
7612 return false;
7613 }
7614
7615 State.FreeRegs -= SizeInRegs;
7616
7617 return true;
7618 }
7619
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const7620 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7621 CCState &State) const {
7622 if (!ByVal) {
7623 if (State.FreeRegs) {
7624 --State.FreeRegs; // Non-byval indirects just use one pointer.
7625 return getNaturalAlignIndirectInReg(Ty);
7626 }
7627 return getNaturalAlignIndirect(Ty, false);
7628 }
7629
7630 // Compute the byval alignment.
7631 const unsigned MinABIStackAlignInBytes = 4;
7632 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7633 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7634 /*Realign=*/TypeAlign >
7635 MinABIStackAlignInBytes);
7636 }
7637
classifyArgumentType(QualType Ty,CCState & State) const7638 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7639 CCState &State) const {
7640 // Check with the C++ ABI first.
7641 const RecordType *RT = Ty->getAs<RecordType>();
7642 if (RT) {
7643 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7644 if (RAA == CGCXXABI::RAA_Indirect) {
7645 return getIndirectResult(Ty, /*ByVal=*/false, State);
7646 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7647 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7648 }
7649 }
7650
7651 if (isAggregateTypeForABI(Ty)) {
7652 // Structures with flexible arrays are always indirect.
7653 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7654 return getIndirectResult(Ty, /*ByVal=*/true, State);
7655
7656 // Ignore empty structs/unions.
7657 if (isEmptyRecord(getContext(), Ty, true))
7658 return ABIArgInfo::getIgnore();
7659
7660 llvm::LLVMContext &LLVMContext = getVMContext();
7661 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7662 if (SizeInRegs <= State.FreeRegs) {
7663 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7664 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7665 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7666 State.FreeRegs -= SizeInRegs;
7667 return ABIArgInfo::getDirectInReg(Result);
7668 } else {
7669 State.FreeRegs = 0;
7670 }
7671 return getIndirectResult(Ty, true, State);
7672 }
7673
7674 // Treat an enum type as its underlying type.
7675 if (const auto *EnumTy = Ty->getAs<EnumType>())
7676 Ty = EnumTy->getDecl()->getIntegerType();
7677
7678 bool InReg = shouldUseInReg(Ty, State);
7679 if (Ty->isPromotableIntegerType()) {
7680 if (InReg)
7681 return ABIArgInfo::getDirectInReg();
7682 return ABIArgInfo::getExtend(Ty);
7683 }
7684 if (InReg)
7685 return ABIArgInfo::getDirectInReg();
7686 return ABIArgInfo::getDirect();
7687 }
7688
7689 namespace {
7690 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7691 public:
LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)7692 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7693 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7694 };
7695 }
7696
7697 //===----------------------------------------------------------------------===//
7698 // AMDGPU ABI Implementation
7699 //===----------------------------------------------------------------------===//
7700
7701 namespace {
7702
7703 class AMDGPUABIInfo final : public DefaultABIInfo {
7704 private:
7705 static const unsigned MaxNumRegsForArgsRet = 16;
7706
7707 unsigned numRegsForType(QualType Ty) const;
7708
7709 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
7710 bool isHomogeneousAggregateSmallEnough(const Type *Base,
7711 uint64_t Members) const override;
7712
7713 // Coerce HIP pointer arguments from generic pointers to global ones.
coerceKernelArgumentType(llvm::Type * Ty,unsigned FromAS,unsigned ToAS) const7714 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
7715 unsigned ToAS) const {
7716 // Structure types.
7717 if (auto STy = dyn_cast<llvm::StructType>(Ty)) {
7718 SmallVector<llvm::Type *, 8> EltTys;
7719 bool Changed = false;
7720 for (auto T : STy->elements()) {
7721 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
7722 EltTys.push_back(NT);
7723 Changed |= (NT != T);
7724 }
7725 // Skip if there is no change in element types.
7726 if (!Changed)
7727 return STy;
7728 if (STy->hasName())
7729 return llvm::StructType::create(
7730 EltTys, (STy->getName() + ".coerce").str(), STy->isPacked());
7731 return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked());
7732 }
7733 // Arrary types.
7734 if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
7735 auto T = ATy->getElementType();
7736 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
7737 // Skip if there is no change in that element type.
7738 if (NT == T)
7739 return ATy;
7740 return llvm::ArrayType::get(NT, ATy->getNumElements());
7741 }
7742 // Single value types.
7743 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
7744 return llvm::PointerType::get(
7745 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
7746 return Ty;
7747 }
7748
7749 public:
AMDGPUABIInfo(CodeGen::CodeGenTypes & CGT)7750 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
7751 DefaultABIInfo(CGT) {}
7752
7753 ABIArgInfo classifyReturnType(QualType RetTy) const;
7754 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
7755 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
7756
7757 void computeInfo(CGFunctionInfo &FI) const override;
7758 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7759 QualType Ty) const override;
7760 };
7761
isHomogeneousAggregateBaseType(QualType Ty) const7762 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
7763 return true;
7764 }
7765
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const7766 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7767 const Type *Base, uint64_t Members) const {
7768 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
7769
7770 // Homogeneous Aggregates may occupy at most 16 registers.
7771 return Members * NumRegs <= MaxNumRegsForArgsRet;
7772 }
7773
7774 /// Estimate number of registers the type will use when passed in registers.
numRegsForType(QualType Ty) const7775 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
7776 unsigned NumRegs = 0;
7777
7778 if (const VectorType *VT = Ty->getAs<VectorType>()) {
7779 // Compute from the number of elements. The reported size is based on the
7780 // in-memory size, which includes the padding 4th element for 3-vectors.
7781 QualType EltTy = VT->getElementType();
7782 unsigned EltSize = getContext().getTypeSize(EltTy);
7783
7784 // 16-bit element vectors should be passed as packed.
7785 if (EltSize == 16)
7786 return (VT->getNumElements() + 1) / 2;
7787
7788 unsigned EltNumRegs = (EltSize + 31) / 32;
7789 return EltNumRegs * VT->getNumElements();
7790 }
7791
7792 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7793 const RecordDecl *RD = RT->getDecl();
7794 assert(!RD->hasFlexibleArrayMember());
7795
7796 for (const FieldDecl *Field : RD->fields()) {
7797 QualType FieldTy = Field->getType();
7798 NumRegs += numRegsForType(FieldTy);
7799 }
7800
7801 return NumRegs;
7802 }
7803
7804 return (getContext().getTypeSize(Ty) + 31) / 32;
7805 }
7806
computeInfo(CGFunctionInfo & FI) const7807 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7808 llvm::CallingConv::ID CC = FI.getCallingConvention();
7809
7810 if (!getCXXABI().classifyReturnType(FI))
7811 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7812
7813 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7814 for (auto &Arg : FI.arguments()) {
7815 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7816 Arg.info = classifyKernelArgumentType(Arg.type);
7817 } else {
7818 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
7819 }
7820 }
7821 }
7822
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7823 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7824 QualType Ty) const {
7825 llvm_unreachable("AMDGPU does not support varargs");
7826 }
7827
classifyReturnType(QualType RetTy) const7828 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
7829 if (isAggregateTypeForABI(RetTy)) {
7830 // Records with non-trivial destructors/copy-constructors should not be
7831 // returned by value.
7832 if (!getRecordArgABI(RetTy, getCXXABI())) {
7833 // Ignore empty structs/unions.
7834 if (isEmptyRecord(getContext(), RetTy, true))
7835 return ABIArgInfo::getIgnore();
7836
7837 // Lower single-element structs to just return a regular value.
7838 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
7839 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7840
7841 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
7842 const RecordDecl *RD = RT->getDecl();
7843 if (RD->hasFlexibleArrayMember())
7844 return DefaultABIInfo::classifyReturnType(RetTy);
7845 }
7846
7847 // Pack aggregates <= 4 bytes into single VGPR or pair.
7848 uint64_t Size = getContext().getTypeSize(RetTy);
7849 if (Size <= 16)
7850 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7851
7852 if (Size <= 32)
7853 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7854
7855 if (Size <= 64) {
7856 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7857 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7858 }
7859
7860 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7861 return ABIArgInfo::getDirect();
7862 }
7863 }
7864
7865 // Otherwise just do the default thing.
7866 return DefaultABIInfo::classifyReturnType(RetTy);
7867 }
7868
7869 /// For kernels all parameters are really passed in a special buffer. It doesn't
7870 /// make sense to pass anything byval, so everything must be direct.
classifyKernelArgumentType(QualType Ty) const7871 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
7872 Ty = useFirstFieldIfTransparentUnion(Ty);
7873
7874 // TODO: Can we omit empty structs?
7875
7876 llvm::Type *LTy = nullptr;
7877 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7878 LTy = CGT.ConvertType(QualType(SeltTy, 0));
7879
7880 if (getContext().getLangOpts().HIP) {
7881 if (!LTy)
7882 LTy = CGT.ConvertType(Ty);
7883 LTy = coerceKernelArgumentType(
7884 LTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
7885 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
7886 }
7887
7888 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7889 // individual elements, which confuses the Clover OpenCL backend; therefore we
7890 // have to set it to false here. Other args of getDirect() are just defaults.
7891 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
7892 }
7893
classifyArgumentType(QualType Ty,unsigned & NumRegsLeft) const7894 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
7895 unsigned &NumRegsLeft) const {
7896 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
7897
7898 Ty = useFirstFieldIfTransparentUnion(Ty);
7899
7900 if (isAggregateTypeForABI(Ty)) {
7901 // Records with non-trivial destructors/copy-constructors should not be
7902 // passed by value.
7903 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
7904 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7905
7906 // Ignore empty structs/unions.
7907 if (isEmptyRecord(getContext(), Ty, true))
7908 return ABIArgInfo::getIgnore();
7909
7910 // Lower single-element structs to just pass a regular value. TODO: We
7911 // could do reasonable-size multiple-element structs too, using getExpand(),
7912 // though watch out for things like bitfields.
7913 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7914 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7915
7916 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7917 const RecordDecl *RD = RT->getDecl();
7918 if (RD->hasFlexibleArrayMember())
7919 return DefaultABIInfo::classifyArgumentType(Ty);
7920 }
7921
7922 // Pack aggregates <= 8 bytes into single VGPR or pair.
7923 uint64_t Size = getContext().getTypeSize(Ty);
7924 if (Size <= 64) {
7925 unsigned NumRegs = (Size + 31) / 32;
7926 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
7927
7928 if (Size <= 16)
7929 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7930
7931 if (Size <= 32)
7932 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7933
7934 // XXX: Should this be i64 instead, and should the limit increase?
7935 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7936 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7937 }
7938
7939 if (NumRegsLeft > 0) {
7940 unsigned NumRegs = numRegsForType(Ty);
7941 if (NumRegsLeft >= NumRegs) {
7942 NumRegsLeft -= NumRegs;
7943 return ABIArgInfo::getDirect();
7944 }
7945 }
7946 }
7947
7948 // Otherwise just do the default thing.
7949 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
7950 if (!ArgInfo.isIndirect()) {
7951 unsigned NumRegs = numRegsForType(Ty);
7952 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
7953 }
7954
7955 return ArgInfo;
7956 }
7957
7958 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7959 public:
AMDGPUTargetCodeGenInfo(CodeGenTypes & CGT)7960 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7961 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7962 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7963 CodeGen::CodeGenModule &M) const override;
7964 unsigned getOpenCLKernelCallingConv() const override;
7965
7966 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7967 llvm::PointerType *T, QualType QT) const override;
7968
getASTAllocaAddressSpace() const7969 LangAS getASTAllocaAddressSpace() const override {
7970 return getLangASFromTargetAS(
7971 getABIInfo().getDataLayout().getAllocaAddrSpace());
7972 }
7973 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
7974 const VarDecl *D) const override;
7975 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
7976 SyncScope Scope,
7977 llvm::AtomicOrdering Ordering,
7978 llvm::LLVMContext &Ctx) const override;
7979 llvm::Function *
7980 createEnqueuedBlockKernel(CodeGenFunction &CGF,
7981 llvm::Function *BlockInvokeFunc,
7982 llvm::Value *BlockLiteral) const override;
7983 bool shouldEmitStaticExternCAliases() const override;
7984 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
7985 };
7986 }
7987
requiresAMDGPUProtectedVisibility(const Decl * D,llvm::GlobalValue * GV)7988 static bool requiresAMDGPUProtectedVisibility(const Decl *D,
7989 llvm::GlobalValue *GV) {
7990 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
7991 return false;
7992
7993 return D->hasAttr<OpenCLKernelAttr>() ||
7994 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
7995 (isa<VarDecl>(D) &&
7996 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
7997 D->hasAttr<HIPPinnedShadowAttr>()));
7998 }
7999
requiresAMDGPUDefaultVisibility(const Decl * D,llvm::GlobalValue * GV)8000 static bool requiresAMDGPUDefaultVisibility(const Decl *D,
8001 llvm::GlobalValue *GV) {
8002 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
8003 return false;
8004
8005 return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
8006 }
8007
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const8008 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
8009 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8010 if (requiresAMDGPUDefaultVisibility(D, GV)) {
8011 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
8012 GV->setDSOLocal(false);
8013 } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
8014 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
8015 GV->setDSOLocal(true);
8016 }
8017
8018 if (GV->isDeclaration())
8019 return;
8020 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8021 if (!FD)
8022 return;
8023
8024 llvm::Function *F = cast<llvm::Function>(GV);
8025
8026 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
8027 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
8028
8029
8030 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
8031 FD->hasAttr<OpenCLKernelAttr>();
8032 const bool IsHIPKernel = M.getLangOpts().HIP &&
8033 FD->hasAttr<CUDAGlobalAttr>();
8034 if ((IsOpenCLKernel || IsHIPKernel) &&
8035 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
8036 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
8037
8038 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
8039 if (ReqdWGS || FlatWGS) {
8040 unsigned Min = 0;
8041 unsigned Max = 0;
8042 if (FlatWGS) {
8043 Min = FlatWGS->getMin()
8044 ->EvaluateKnownConstInt(M.getContext())
8045 .getExtValue();
8046 Max = FlatWGS->getMax()
8047 ->EvaluateKnownConstInt(M.getContext())
8048 .getExtValue();
8049 }
8050 if (ReqdWGS && Min == 0 && Max == 0)
8051 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
8052
8053 if (Min != 0) {
8054 assert(Min <= Max && "Min must be less than or equal Max");
8055
8056 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
8057 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
8058 } else
8059 assert(Max == 0 && "Max must be zero");
8060 } else if (IsOpenCLKernel || IsHIPKernel) {
8061 // By default, restrict the maximum size to a value specified by
8062 // --gpu-max-threads-per-block=n or its default value.
8063 std::string AttrVal =
8064 std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
8065 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
8066 }
8067
8068 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
8069 unsigned Min =
8070 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
8071 unsigned Max = Attr->getMax() ? Attr->getMax()
8072 ->EvaluateKnownConstInt(M.getContext())
8073 .getExtValue()
8074 : 0;
8075
8076 if (Min != 0) {
8077 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
8078
8079 std::string AttrVal = llvm::utostr(Min);
8080 if (Max != 0)
8081 AttrVal = AttrVal + "," + llvm::utostr(Max);
8082 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
8083 } else
8084 assert(Max == 0 && "Max must be zero");
8085 }
8086
8087 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
8088 unsigned NumSGPR = Attr->getNumSGPR();
8089
8090 if (NumSGPR != 0)
8091 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
8092 }
8093
8094 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
8095 uint32_t NumVGPR = Attr->getNumVGPR();
8096
8097 if (NumVGPR != 0)
8098 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
8099 }
8100 }
8101
getOpenCLKernelCallingConv() const8102 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8103 return llvm::CallingConv::AMDGPU_KERNEL;
8104 }
8105
8106 // Currently LLVM assumes null pointers always have value 0,
8107 // which results in incorrectly transformed IR. Therefore, instead of
8108 // emitting null pointers in private and local address spaces, a null
8109 // pointer in generic address space is emitted which is casted to a
8110 // pointer in local or private address space.
getNullPointer(const CodeGen::CodeGenModule & CGM,llvm::PointerType * PT,QualType QT) const8111 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
8112 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
8113 QualType QT) const {
8114 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
8115 return llvm::ConstantPointerNull::get(PT);
8116
8117 auto &Ctx = CGM.getContext();
8118 auto NPT = llvm::PointerType::get(PT->getElementType(),
8119 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
8120 return llvm::ConstantExpr::getAddrSpaceCast(
8121 llvm::ConstantPointerNull::get(NPT), PT);
8122 }
8123
8124 LangAS
getGlobalVarAddressSpace(CodeGenModule & CGM,const VarDecl * D) const8125 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
8126 const VarDecl *D) const {
8127 assert(!CGM.getLangOpts().OpenCL &&
8128 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
8129 "Address space agnostic languages only");
8130 LangAS DefaultGlobalAS = getLangASFromTargetAS(
8131 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
8132 if (!D)
8133 return DefaultGlobalAS;
8134
8135 LangAS AddrSpace = D->getType().getAddressSpace();
8136 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
8137 if (AddrSpace != LangAS::Default)
8138 return AddrSpace;
8139
8140 if (CGM.isTypeConstant(D->getType(), false)) {
8141 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
8142 return ConstAS.getValue();
8143 }
8144 return DefaultGlobalAS;
8145 }
8146
8147 llvm::SyncScope::ID
getLLVMSyncScopeID(const LangOptions & LangOpts,SyncScope Scope,llvm::AtomicOrdering Ordering,llvm::LLVMContext & Ctx) const8148 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
8149 SyncScope Scope,
8150 llvm::AtomicOrdering Ordering,
8151 llvm::LLVMContext &Ctx) const {
8152 std::string Name;
8153 switch (Scope) {
8154 case SyncScope::OpenCLWorkGroup:
8155 Name = "workgroup";
8156 break;
8157 case SyncScope::OpenCLDevice:
8158 Name = "agent";
8159 break;
8160 case SyncScope::OpenCLAllSVMDevices:
8161 Name = "";
8162 break;
8163 case SyncScope::OpenCLSubGroup:
8164 Name = "wavefront";
8165 }
8166
8167 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
8168 if (!Name.empty())
8169 Name = Twine(Twine(Name) + Twine("-")).str();
8170
8171 Name = Twine(Twine(Name) + Twine("one-as")).str();
8172 }
8173
8174 return Ctx.getOrInsertSyncScopeID(Name);
8175 }
8176
shouldEmitStaticExternCAliases() const8177 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
8178 return false;
8179 }
8180
setCUDAKernelCallingConvention(const FunctionType * & FT) const8181 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
8182 const FunctionType *&FT) const {
8183 FT = getABIInfo().getContext().adjustFunctionType(
8184 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
8185 }
8186
8187 //===----------------------------------------------------------------------===//
8188 // SPARC v8 ABI Implementation.
8189 // Based on the SPARC Compliance Definition version 2.4.1.
8190 //
8191 // Ensures that complex values are passed in registers.
8192 //
8193 namespace {
8194 class SparcV8ABIInfo : public DefaultABIInfo {
8195 public:
SparcV8ABIInfo(CodeGenTypes & CGT)8196 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8197
8198 private:
8199 ABIArgInfo classifyReturnType(QualType RetTy) const;
8200 void computeInfo(CGFunctionInfo &FI) const override;
8201 };
8202 } // end anonymous namespace
8203
8204
8205 ABIArgInfo
classifyReturnType(QualType Ty) const8206 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
8207 if (Ty->isAnyComplexType()) {
8208 return ABIArgInfo::getDirect();
8209 }
8210 else {
8211 return DefaultABIInfo::classifyReturnType(Ty);
8212 }
8213 }
8214
computeInfo(CGFunctionInfo & FI) const8215 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8216
8217 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8218 for (auto &Arg : FI.arguments())
8219 Arg.info = classifyArgumentType(Arg.type);
8220 }
8221
8222 namespace {
8223 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
8224 public:
SparcV8TargetCodeGenInfo(CodeGenTypes & CGT)8225 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
8226 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
8227 };
8228 } // end anonymous namespace
8229
8230 //===----------------------------------------------------------------------===//
8231 // SPARC v9 ABI Implementation.
8232 // Based on the SPARC Compliance Definition version 2.4.1.
8233 //
8234 // Function arguments a mapped to a nominal "parameter array" and promoted to
8235 // registers depending on their type. Each argument occupies 8 or 16 bytes in
8236 // the array, structs larger than 16 bytes are passed indirectly.
8237 //
8238 // One case requires special care:
8239 //
8240 // struct mixed {
8241 // int i;
8242 // float f;
8243 // };
8244 //
8245 // When a struct mixed is passed by value, it only occupies 8 bytes in the
8246 // parameter array, but the int is passed in an integer register, and the float
8247 // is passed in a floating point register. This is represented as two arguments
8248 // with the LLVM IR inreg attribute:
8249 //
8250 // declare void f(i32 inreg %i, float inreg %f)
8251 //
8252 // The code generator will only allocate 4 bytes from the parameter array for
8253 // the inreg arguments. All other arguments are allocated a multiple of 8
8254 // bytes.
8255 //
8256 namespace {
8257 class SparcV9ABIInfo : public ABIInfo {
8258 public:
SparcV9ABIInfo(CodeGenTypes & CGT)8259 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
8260
8261 private:
8262 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
8263 void computeInfo(CGFunctionInfo &FI) const override;
8264 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8265 QualType Ty) const override;
8266
8267 // Coercion type builder for structs passed in registers. The coercion type
8268 // serves two purposes:
8269 //
8270 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
8271 // in registers.
8272 // 2. Expose aligned floating point elements as first-level elements, so the
8273 // code generator knows to pass them in floating point registers.
8274 //
8275 // We also compute the InReg flag which indicates that the struct contains
8276 // aligned 32-bit floats.
8277 //
8278 struct CoerceBuilder {
8279 llvm::LLVMContext &Context;
8280 const llvm::DataLayout &DL;
8281 SmallVector<llvm::Type*, 8> Elems;
8282 uint64_t Size;
8283 bool InReg;
8284
CoerceBuilder__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8285 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
8286 : Context(c), DL(dl), Size(0), InReg(false) {}
8287
8288 // Pad Elems with integers until Size is ToSize.
pad__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8289 void pad(uint64_t ToSize) {
8290 assert(ToSize >= Size && "Cannot remove elements");
8291 if (ToSize == Size)
8292 return;
8293
8294 // Finish the current 64-bit word.
8295 uint64_t Aligned = llvm::alignTo(Size, 64);
8296 if (Aligned > Size && Aligned <= ToSize) {
8297 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8298 Size = Aligned;
8299 }
8300
8301 // Add whole 64-bit words.
8302 while (Size + 64 <= ToSize) {
8303 Elems.push_back(llvm::Type::getInt64Ty(Context));
8304 Size += 64;
8305 }
8306
8307 // Final in-word padding.
8308 if (Size < ToSize) {
8309 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8310 Size = ToSize;
8311 }
8312 }
8313
8314 // Add a floating point element at Offset.
addFloat__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8315 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
8316 // Unaligned floats are treated as integers.
8317 if (Offset % Bits)
8318 return;
8319 // The InReg flag is only required if there are any floats < 64 bits.
8320 if (Bits < 64)
8321 InReg = true;
8322 pad(Offset);
8323 Elems.push_back(Ty);
8324 Size = Offset + Bits;
8325 }
8326
8327 // Add a struct type to the coercion type, starting at Offset (in bits).
addStruct__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8328 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8329 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8330 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8331 llvm::Type *ElemTy = StrTy->getElementType(i);
8332 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8333 switch (ElemTy->getTypeID()) {
8334 case llvm::Type::StructTyID:
8335 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8336 break;
8337 case llvm::Type::FloatTyID:
8338 addFloat(ElemOffset, ElemTy, 32);
8339 break;
8340 case llvm::Type::DoubleTyID:
8341 addFloat(ElemOffset, ElemTy, 64);
8342 break;
8343 case llvm::Type::FP128TyID:
8344 addFloat(ElemOffset, ElemTy, 128);
8345 break;
8346 case llvm::Type::PointerTyID:
8347 if (ElemOffset % 64 == 0) {
8348 pad(ElemOffset);
8349 Elems.push_back(ElemTy);
8350 Size += 64;
8351 }
8352 break;
8353 default:
8354 break;
8355 }
8356 }
8357 }
8358
8359 // Check if Ty is a usable substitute for the coercion type.
isUsableType__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8360 bool isUsableType(llvm::StructType *Ty) const {
8361 return llvm::makeArrayRef(Elems) == Ty->elements();
8362 }
8363
8364 // Get the coercion type as a literal struct type.
getType__anonecf8f19a1611::SparcV9ABIInfo::CoerceBuilder8365 llvm::Type *getType() const {
8366 if (Elems.size() == 1)
8367 return Elems.front();
8368 else
8369 return llvm::StructType::get(Context, Elems);
8370 }
8371 };
8372 };
8373 } // end anonymous namespace
8374
8375 ABIArgInfo
classifyType(QualType Ty,unsigned SizeLimit) const8376 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
8377 if (Ty->isVoidType())
8378 return ABIArgInfo::getIgnore();
8379
8380 uint64_t Size = getContext().getTypeSize(Ty);
8381
8382 // Anything too big to fit in registers is passed with an explicit indirect
8383 // pointer / sret pointer.
8384 if (Size > SizeLimit)
8385 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
8386
8387 // Treat an enum type as its underlying type.
8388 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8389 Ty = EnumTy->getDecl()->getIntegerType();
8390
8391 // Integer types smaller than a register are extended.
8392 if (Size < 64 && Ty->isIntegerType())
8393 return ABIArgInfo::getExtend(Ty);
8394
8395 // Other non-aggregates go in registers.
8396 if (!isAggregateTypeForABI(Ty))
8397 return ABIArgInfo::getDirect();
8398
8399 // If a C++ object has either a non-trivial copy constructor or a non-trivial
8400 // destructor, it is passed with an explicit indirect pointer / sret pointer.
8401 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8402 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8403
8404 // This is a small aggregate type that should be passed in registers.
8405 // Build a coercion type from the LLVM struct type.
8406 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
8407 if (!StrTy)
8408 return ABIArgInfo::getDirect();
8409
8410 CoerceBuilder CB(getVMContext(), getDataLayout());
8411 CB.addStruct(0, StrTy);
8412 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8413
8414 // Try to use the original type for coercion.
8415 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8416
8417 if (CB.InReg)
8418 return ABIArgInfo::getDirectInReg(CoerceTy);
8419 else
8420 return ABIArgInfo::getDirect(CoerceTy);
8421 }
8422
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8423 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8424 QualType Ty) const {
8425 ABIArgInfo AI = classifyType(Ty, 16 * 8);
8426 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8427 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8428 AI.setCoerceToType(ArgTy);
8429
8430 CharUnits SlotSize = CharUnits::fromQuantity(8);
8431
8432 CGBuilderTy &Builder = CGF.Builder;
8433 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
8434 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8435
8436 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
8437
8438 Address ArgAddr = Address::invalid();
8439 CharUnits Stride;
8440 switch (AI.getKind()) {
8441 case ABIArgInfo::Expand:
8442 case ABIArgInfo::CoerceAndExpand:
8443 case ABIArgInfo::InAlloca:
8444 llvm_unreachable("Unsupported ABI kind for va_arg");
8445
8446 case ABIArgInfo::Extend: {
8447 Stride = SlotSize;
8448 CharUnits Offset = SlotSize - TypeInfo.first;
8449 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
8450 break;
8451 }
8452
8453 case ABIArgInfo::Direct: {
8454 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
8455 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
8456 ArgAddr = Addr;
8457 break;
8458 }
8459
8460 case ABIArgInfo::Indirect:
8461 Stride = SlotSize;
8462 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
8463 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
8464 TypeInfo.second);
8465 break;
8466
8467 case ABIArgInfo::Ignore:
8468 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
8469 }
8470
8471 // Update VAList.
8472 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
8473 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
8474
8475 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
8476 }
8477
computeInfo(CGFunctionInfo & FI) const8478 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8479 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
8480 for (auto &I : FI.arguments())
8481 I.info = classifyType(I.type, 16 * 8);
8482 }
8483
8484 namespace {
8485 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
8486 public:
SparcV9TargetCodeGenInfo(CodeGenTypes & CGT)8487 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
8488 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
8489
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const8490 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8491 return 14;
8492 }
8493
8494 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8495 llvm::Value *Address) const override;
8496 };
8497 } // end anonymous namespace
8498
8499 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const8500 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8501 llvm::Value *Address) const {
8502 // This is calculated from the LLVM and GCC tables and verified
8503 // against gcc output. AFAIK all ABIs use the same encoding.
8504
8505 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8506
8507 llvm::IntegerType *i8 = CGF.Int8Ty;
8508 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8509 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8510
8511 // 0-31: the 8-byte general-purpose registers
8512 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
8513
8514 // 32-63: f0-31, the 4-byte floating-point registers
8515 AssignToArrayRange(Builder, Address, Four8, 32, 63);
8516
8517 // Y = 64
8518 // PSR = 65
8519 // WIM = 66
8520 // TBR = 67
8521 // PC = 68
8522 // NPC = 69
8523 // FSR = 70
8524 // CSR = 71
8525 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
8526
8527 // 72-87: d0-15, the 8-byte floating-point registers
8528 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
8529
8530 return false;
8531 }
8532
8533 // ARC ABI implementation.
8534 namespace {
8535
8536 class ARCABIInfo : public DefaultABIInfo {
8537 public:
8538 using DefaultABIInfo::DefaultABIInfo;
8539
8540 private:
8541 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8542 QualType Ty) const override;
8543
updateState(const ABIArgInfo & Info,QualType Ty,CCState & State) const8544 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
8545 if (!State.FreeRegs)
8546 return;
8547 if (Info.isIndirect() && Info.getInReg())
8548 State.FreeRegs--;
8549 else if (Info.isDirect() && Info.getInReg()) {
8550 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
8551 if (sz < State.FreeRegs)
8552 State.FreeRegs -= sz;
8553 else
8554 State.FreeRegs = 0;
8555 }
8556 }
8557
computeInfo(CGFunctionInfo & FI) const8558 void computeInfo(CGFunctionInfo &FI) const override {
8559 CCState State(FI);
8560 // ARC uses 8 registers to pass arguments.
8561 State.FreeRegs = 8;
8562
8563 if (!getCXXABI().classifyReturnType(FI))
8564 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8565 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
8566 for (auto &I : FI.arguments()) {
8567 I.info = classifyArgumentType(I.type, State.FreeRegs);
8568 updateState(I.info, I.type, State);
8569 }
8570 }
8571
8572 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
8573 ABIArgInfo getIndirectByValue(QualType Ty) const;
8574 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
8575 ABIArgInfo classifyReturnType(QualType RetTy) const;
8576 };
8577
8578 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
8579 public:
ARCTargetCodeGenInfo(CodeGenTypes & CGT)8580 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
8581 : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
8582 };
8583
8584
getIndirectByRef(QualType Ty,bool HasFreeRegs) const8585 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
8586 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
8587 getNaturalAlignIndirect(Ty, false);
8588 }
8589
getIndirectByValue(QualType Ty) const8590 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
8591 // Compute the byval alignment.
8592 const unsigned MinABIStackAlignInBytes = 4;
8593 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8594 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8595 TypeAlign > MinABIStackAlignInBytes);
8596 }
8597
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8598 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8599 QualType Ty) const {
8600 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8601 getContext().getTypeInfoInChars(Ty),
8602 CharUnits::fromQuantity(4), true);
8603 }
8604
classifyArgumentType(QualType Ty,uint8_t FreeRegs) const8605 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
8606 uint8_t FreeRegs) const {
8607 // Handle the generic C++ ABI.
8608 const RecordType *RT = Ty->getAs<RecordType>();
8609 if (RT) {
8610 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8611 if (RAA == CGCXXABI::RAA_Indirect)
8612 return getIndirectByRef(Ty, FreeRegs > 0);
8613
8614 if (RAA == CGCXXABI::RAA_DirectInMemory)
8615 return getIndirectByValue(Ty);
8616 }
8617
8618 // Treat an enum type as its underlying type.
8619 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8620 Ty = EnumTy->getDecl()->getIntegerType();
8621
8622 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
8623
8624 if (isAggregateTypeForABI(Ty)) {
8625 // Structures with flexible arrays are always indirect.
8626 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8627 return getIndirectByValue(Ty);
8628
8629 // Ignore empty structs/unions.
8630 if (isEmptyRecord(getContext(), Ty, true))
8631 return ABIArgInfo::getIgnore();
8632
8633 llvm::LLVMContext &LLVMContext = getVMContext();
8634
8635 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8636 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8637 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8638
8639 return FreeRegs >= SizeInRegs ?
8640 ABIArgInfo::getDirectInReg(Result) :
8641 ABIArgInfo::getDirect(Result, 0, nullptr, false);
8642 }
8643
8644 return Ty->isPromotableIntegerType() ?
8645 (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
8646 ABIArgInfo::getExtend(Ty)) :
8647 (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
8648 ABIArgInfo::getDirect());
8649 }
8650
classifyReturnType(QualType RetTy) const8651 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
8652 if (RetTy->isAnyComplexType())
8653 return ABIArgInfo::getDirectInReg();
8654
8655 // Arguments of size > 4 registers are indirect.
8656 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
8657 if (RetSize > 4)
8658 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
8659
8660 return DefaultABIInfo::classifyReturnType(RetTy);
8661 }
8662
8663 } // End anonymous namespace.
8664
8665 //===----------------------------------------------------------------------===//
8666 // XCore ABI Implementation
8667 //===----------------------------------------------------------------------===//
8668
8669 namespace {
8670
8671 /// A SmallStringEnc instance is used to build up the TypeString by passing
8672 /// it by reference between functions that append to it.
8673 typedef llvm::SmallString<128> SmallStringEnc;
8674
8675 /// TypeStringCache caches the meta encodings of Types.
8676 ///
8677 /// The reason for caching TypeStrings is two fold:
8678 /// 1. To cache a type's encoding for later uses;
8679 /// 2. As a means to break recursive member type inclusion.
8680 ///
8681 /// A cache Entry can have a Status of:
8682 /// NonRecursive: The type encoding is not recursive;
8683 /// Recursive: The type encoding is recursive;
8684 /// Incomplete: An incomplete TypeString;
8685 /// IncompleteUsed: An incomplete TypeString that has been used in a
8686 /// Recursive type encoding.
8687 ///
8688 /// A NonRecursive entry will have all of its sub-members expanded as fully
8689 /// as possible. Whilst it may contain types which are recursive, the type
8690 /// itself is not recursive and thus its encoding may be safely used whenever
8691 /// the type is encountered.
8692 ///
8693 /// A Recursive entry will have all of its sub-members expanded as fully as
8694 /// possible. The type itself is recursive and it may contain other types which
8695 /// are recursive. The Recursive encoding must not be used during the expansion
8696 /// of a recursive type's recursive branch. For simplicity the code uses
8697 /// IncompleteCount to reject all usage of Recursive encodings for member types.
8698 ///
8699 /// An Incomplete entry is always a RecordType and only encodes its
8700 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
8701 /// are placed into the cache during type expansion as a means to identify and
8702 /// handle recursive inclusion of types as sub-members. If there is recursion
8703 /// the entry becomes IncompleteUsed.
8704 ///
8705 /// During the expansion of a RecordType's members:
8706 ///
8707 /// If the cache contains a NonRecursive encoding for the member type, the
8708 /// cached encoding is used;
8709 ///
8710 /// If the cache contains a Recursive encoding for the member type, the
8711 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
8712 ///
8713 /// If the member is a RecordType, an Incomplete encoding is placed into the
8714 /// cache to break potential recursive inclusion of itself as a sub-member;
8715 ///
8716 /// Once a member RecordType has been expanded, its temporary incomplete
8717 /// entry is removed from the cache. If a Recursive encoding was swapped out
8718 /// it is swapped back in;
8719 ///
8720 /// If an incomplete entry is used to expand a sub-member, the incomplete
8721 /// entry is marked as IncompleteUsed. The cache keeps count of how many
8722 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
8723 ///
8724 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
8725 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
8726 /// Else the member is part of a recursive type and thus the recursion has
8727 /// been exited too soon for the encoding to be correct for the member.
8728 ///
8729 class TypeStringCache {
8730 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
8731 struct Entry {
8732 std::string Str; // The encoded TypeString for the type.
8733 enum Status State; // Information about the encoding in 'Str'.
8734 std::string Swapped; // A temporary place holder for a Recursive encoding
8735 // during the expansion of RecordType's members.
8736 };
8737 std::map<const IdentifierInfo *, struct Entry> Map;
8738 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
8739 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
8740 public:
TypeStringCache()8741 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8742 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
8743 bool removeIncomplete(const IdentifierInfo *ID);
8744 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
8745 bool IsRecursive);
8746 StringRef lookupStr(const IdentifierInfo *ID);
8747 };
8748
8749 /// TypeString encodings for enum & union fields must be order.
8750 /// FieldEncoding is a helper for this ordering process.
8751 class FieldEncoding {
8752 bool HasName;
8753 std::string Enc;
8754 public:
FieldEncoding(bool b,SmallStringEnc & e)8755 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
str()8756 StringRef str() { return Enc; }
operator <(const FieldEncoding & rhs) const8757 bool operator<(const FieldEncoding &rhs) const {
8758 if (HasName != rhs.HasName) return HasName;
8759 return Enc < rhs.Enc;
8760 }
8761 };
8762
8763 class XCoreABIInfo : public DefaultABIInfo {
8764 public:
XCoreABIInfo(CodeGen::CodeGenTypes & CGT)8765 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8766 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8767 QualType Ty) const override;
8768 };
8769
8770 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
8771 mutable TypeStringCache TSC;
8772 public:
XCoreTargetCodeGenInfo(CodeGenTypes & CGT)8773 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
8774 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
8775 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8776 CodeGen::CodeGenModule &M) const override;
8777 };
8778
8779 } // End anonymous namespace.
8780
8781 // TODO: this implementation is likely now redundant with the default
8782 // EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8783 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8784 QualType Ty) const {
8785 CGBuilderTy &Builder = CGF.Builder;
8786
8787 // Get the VAList.
8788 CharUnits SlotSize = CharUnits::fromQuantity(4);
8789 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
8790
8791 // Handle the argument.
8792 ABIArgInfo AI = classifyArgumentType(Ty);
8793 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
8794 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8795 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8796 AI.setCoerceToType(ArgTy);
8797 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8798
8799 Address Val = Address::invalid();
8800 CharUnits ArgSize = CharUnits::Zero();
8801 switch (AI.getKind()) {
8802 case ABIArgInfo::Expand:
8803 case ABIArgInfo::CoerceAndExpand:
8804 case ABIArgInfo::InAlloca:
8805 llvm_unreachable("Unsupported ABI kind for va_arg");
8806 case ABIArgInfo::Ignore:
8807 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8808 ArgSize = CharUnits::Zero();
8809 break;
8810 case ABIArgInfo::Extend:
8811 case ABIArgInfo::Direct:
8812 Val = Builder.CreateBitCast(AP, ArgPtrTy);
8813 ArgSize = CharUnits::fromQuantity(
8814 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
8815 ArgSize = ArgSize.alignTo(SlotSize);
8816 break;
8817 case ABIArgInfo::Indirect:
8818 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
8819 Val = Address(Builder.CreateLoad(Val), TypeAlign);
8820 ArgSize = SlotSize;
8821 break;
8822 }
8823
8824 // Increment the VAList.
8825 if (!ArgSize.isZero()) {
8826 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
8827 Builder.CreateStore(APN.getPointer(), VAListAddr);
8828 }
8829
8830 return Val;
8831 }
8832
8833 /// During the expansion of a RecordType, an incomplete TypeString is placed
8834 /// into the cache as a means to identify and break recursion.
8835 /// If there is a Recursive encoding in the cache, it is swapped out and will
8836 /// be reinserted by removeIncomplete().
8837 /// All other types of encoding should have been used rather than arriving here.
addIncomplete(const IdentifierInfo * ID,std::string StubEnc)8838 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
8839 std::string StubEnc) {
8840 if (!ID)
8841 return;
8842 Entry &E = Map[ID];
8843 assert( (E.Str.empty() || E.State == Recursive) &&
8844 "Incorrectly use of addIncomplete");
8845 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
8846 E.Swapped.swap(E.Str); // swap out the Recursive
8847 E.Str.swap(StubEnc);
8848 E.State = Incomplete;
8849 ++IncompleteCount;
8850 }
8851
8852 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8853 /// must be removed from the cache.
8854 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8855 /// Returns true if the RecordType was defined recursively.
removeIncomplete(const IdentifierInfo * ID)8856 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8857 if (!ID)
8858 return false;
8859 auto I = Map.find(ID);
8860 assert(I != Map.end() && "Entry not present");
8861 Entry &E = I->second;
8862 assert( (E.State == Incomplete ||
8863 E.State == IncompleteUsed) &&
8864 "Entry must be an incomplete type");
8865 bool IsRecursive = false;
8866 if (E.State == IncompleteUsed) {
8867 // We made use of our Incomplete encoding, thus we are recursive.
8868 IsRecursive = true;
8869 --IncompleteUsedCount;
8870 }
8871 if (E.Swapped.empty())
8872 Map.erase(I);
8873 else {
8874 // Swap the Recursive back.
8875 E.Swapped.swap(E.Str);
8876 E.Swapped.clear();
8877 E.State = Recursive;
8878 }
8879 --IncompleteCount;
8880 return IsRecursive;
8881 }
8882
8883 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8884 /// Recursive (viz: all sub-members were expanded as fully as possible).
addIfComplete(const IdentifierInfo * ID,StringRef Str,bool IsRecursive)8885 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8886 bool IsRecursive) {
8887 if (!ID || IncompleteUsedCount)
8888 return; // No key or it is is an incomplete sub-type so don't add.
8889 Entry &E = Map[ID];
8890 if (IsRecursive && !E.Str.empty()) {
8891 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8892 "This is not the same Recursive entry");
8893 // The parent container was not recursive after all, so we could have used
8894 // this Recursive sub-member entry after all, but we assumed the worse when
8895 // we started viz: IncompleteCount!=0.
8896 return;
8897 }
8898 assert(E.Str.empty() && "Entry already present");
8899 E.Str = Str.str();
8900 E.State = IsRecursive? Recursive : NonRecursive;
8901 }
8902
8903 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8904 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8905 /// encoding is Recursive, return an empty StringRef.
lookupStr(const IdentifierInfo * ID)8906 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8907 if (!ID)
8908 return StringRef(); // We have no key.
8909 auto I = Map.find(ID);
8910 if (I == Map.end())
8911 return StringRef(); // We have no encoding.
8912 Entry &E = I->second;
8913 if (E.State == Recursive && IncompleteCount)
8914 return StringRef(); // We don't use Recursive encodings for member types.
8915
8916 if (E.State == Incomplete) {
8917 // The incomplete type is being used to break out of recursion.
8918 E.State = IncompleteUsed;
8919 ++IncompleteUsedCount;
8920 }
8921 return E.Str;
8922 }
8923
8924 /// The XCore ABI includes a type information section that communicates symbol
8925 /// type information to the linker. The linker uses this information to verify
8926 /// safety/correctness of things such as array bound and pointers et al.
8927 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8928 /// This type information (TypeString) is emitted into meta data for all global
8929 /// symbols: definitions, declarations, functions & variables.
8930 ///
8931 /// The TypeString carries type, qualifier, name, size & value details.
8932 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8933 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8934 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8935 ///
8936 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8937 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8938
8939 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
emitTargetMD(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const8940 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8941 CodeGen::CodeGenModule &CGM) const {
8942 SmallStringEnc Enc;
8943 if (getTypeString(Enc, D, CGM, TSC)) {
8944 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8945 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8946 llvm::MDString::get(Ctx, Enc.str())};
8947 llvm::NamedMDNode *MD =
8948 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8949 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8950 }
8951 }
8952
8953 //===----------------------------------------------------------------------===//
8954 // SPIR ABI Implementation
8955 //===----------------------------------------------------------------------===//
8956
8957 namespace {
8958 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8959 public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)8960 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8961 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8962 unsigned getOpenCLKernelCallingConv() const override;
8963 };
8964
8965 } // End anonymous namespace.
8966
8967 namespace clang {
8968 namespace CodeGen {
computeSPIRKernelABIInfo(CodeGenModule & CGM,CGFunctionInfo & FI)8969 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8970 DefaultABIInfo SPIRABI(CGM.getTypes());
8971 SPIRABI.computeInfo(FI);
8972 }
8973 }
8974 }
8975
getOpenCLKernelCallingConv() const8976 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8977 return llvm::CallingConv::SPIR_KERNEL;
8978 }
8979
8980 static bool appendType(SmallStringEnc &Enc, QualType QType,
8981 const CodeGen::CodeGenModule &CGM,
8982 TypeStringCache &TSC);
8983
8984 /// Helper function for appendRecordType().
8985 /// Builds a SmallVector containing the encoded field types in declaration
8986 /// order.
extractFieldType(SmallVectorImpl<FieldEncoding> & FE,const RecordDecl * RD,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)8987 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8988 const RecordDecl *RD,
8989 const CodeGen::CodeGenModule &CGM,
8990 TypeStringCache &TSC) {
8991 for (const auto *Field : RD->fields()) {
8992 SmallStringEnc Enc;
8993 Enc += "m(";
8994 Enc += Field->getName();
8995 Enc += "){";
8996 if (Field->isBitField()) {
8997 Enc += "b(";
8998 llvm::raw_svector_ostream OS(Enc);
8999 OS << Field->getBitWidthValue(CGM.getContext());
9000 Enc += ':';
9001 }
9002 if (!appendType(Enc, Field->getType(), CGM, TSC))
9003 return false;
9004 if (Field->isBitField())
9005 Enc += ')';
9006 Enc += '}';
9007 FE.emplace_back(!Field->getName().empty(), Enc);
9008 }
9009 return true;
9010 }
9011
9012 /// Appends structure and union types to Enc and adds encoding to cache.
9013 /// Recursively calls appendType (via extractFieldType) for each field.
9014 /// Union types have their fields ordered according to the ABI.
appendRecordType(SmallStringEnc & Enc,const RecordType * RT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,const IdentifierInfo * ID)9015 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
9016 const CodeGen::CodeGenModule &CGM,
9017 TypeStringCache &TSC, const IdentifierInfo *ID) {
9018 // Append the cached TypeString if we have one.
9019 StringRef TypeString = TSC.lookupStr(ID);
9020 if (!TypeString.empty()) {
9021 Enc += TypeString;
9022 return true;
9023 }
9024
9025 // Start to emit an incomplete TypeString.
9026 size_t Start = Enc.size();
9027 Enc += (RT->isUnionType()? 'u' : 's');
9028 Enc += '(';
9029 if (ID)
9030 Enc += ID->getName();
9031 Enc += "){";
9032
9033 // We collect all encoded fields and order as necessary.
9034 bool IsRecursive = false;
9035 const RecordDecl *RD = RT->getDecl()->getDefinition();
9036 if (RD && !RD->field_empty()) {
9037 // An incomplete TypeString stub is placed in the cache for this RecordType
9038 // so that recursive calls to this RecordType will use it whilst building a
9039 // complete TypeString for this RecordType.
9040 SmallVector<FieldEncoding, 16> FE;
9041 std::string StubEnc(Enc.substr(Start).str());
9042 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
9043 TSC.addIncomplete(ID, std::move(StubEnc));
9044 if (!extractFieldType(FE, RD, CGM, TSC)) {
9045 (void) TSC.removeIncomplete(ID);
9046 return false;
9047 }
9048 IsRecursive = TSC.removeIncomplete(ID);
9049 // The ABI requires unions to be sorted but not structures.
9050 // See FieldEncoding::operator< for sort algorithm.
9051 if (RT->isUnionType())
9052 llvm::sort(FE);
9053 // We can now complete the TypeString.
9054 unsigned E = FE.size();
9055 for (unsigned I = 0; I != E; ++I) {
9056 if (I)
9057 Enc += ',';
9058 Enc += FE[I].str();
9059 }
9060 }
9061 Enc += '}';
9062 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
9063 return true;
9064 }
9065
9066 /// Appends enum types to Enc and adds the encoding to the cache.
appendEnumType(SmallStringEnc & Enc,const EnumType * ET,TypeStringCache & TSC,const IdentifierInfo * ID)9067 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
9068 TypeStringCache &TSC,
9069 const IdentifierInfo *ID) {
9070 // Append the cached TypeString if we have one.
9071 StringRef TypeString = TSC.lookupStr(ID);
9072 if (!TypeString.empty()) {
9073 Enc += TypeString;
9074 return true;
9075 }
9076
9077 size_t Start = Enc.size();
9078 Enc += "e(";
9079 if (ID)
9080 Enc += ID->getName();
9081 Enc += "){";
9082
9083 // We collect all encoded enumerations and order them alphanumerically.
9084 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
9085 SmallVector<FieldEncoding, 16> FE;
9086 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
9087 ++I) {
9088 SmallStringEnc EnumEnc;
9089 EnumEnc += "m(";
9090 EnumEnc += I->getName();
9091 EnumEnc += "){";
9092 I->getInitVal().toString(EnumEnc);
9093 EnumEnc += '}';
9094 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
9095 }
9096 llvm::sort(FE);
9097 unsigned E = FE.size();
9098 for (unsigned I = 0; I != E; ++I) {
9099 if (I)
9100 Enc += ',';
9101 Enc += FE[I].str();
9102 }
9103 }
9104 Enc += '}';
9105 TSC.addIfComplete(ID, Enc.substr(Start), false);
9106 return true;
9107 }
9108
9109 /// Appends type's qualifier to Enc.
9110 /// This is done prior to appending the type's encoding.
appendQualifier(SmallStringEnc & Enc,QualType QT)9111 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
9112 // Qualifiers are emitted in alphabetical order.
9113 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
9114 int Lookup = 0;
9115 if (QT.isConstQualified())
9116 Lookup += 1<<0;
9117 if (QT.isRestrictQualified())
9118 Lookup += 1<<1;
9119 if (QT.isVolatileQualified())
9120 Lookup += 1<<2;
9121 Enc += Table[Lookup];
9122 }
9123
9124 /// Appends built-in types to Enc.
appendBuiltinType(SmallStringEnc & Enc,const BuiltinType * BT)9125 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
9126 const char *EncType;
9127 switch (BT->getKind()) {
9128 case BuiltinType::Void:
9129 EncType = "0";
9130 break;
9131 case BuiltinType::Bool:
9132 EncType = "b";
9133 break;
9134 case BuiltinType::Char_U:
9135 EncType = "uc";
9136 break;
9137 case BuiltinType::UChar:
9138 EncType = "uc";
9139 break;
9140 case BuiltinType::SChar:
9141 EncType = "sc";
9142 break;
9143 case BuiltinType::UShort:
9144 EncType = "us";
9145 break;
9146 case BuiltinType::Short:
9147 EncType = "ss";
9148 break;
9149 case BuiltinType::UInt:
9150 EncType = "ui";
9151 break;
9152 case BuiltinType::Int:
9153 EncType = "si";
9154 break;
9155 case BuiltinType::ULong:
9156 EncType = "ul";
9157 break;
9158 case BuiltinType::Long:
9159 EncType = "sl";
9160 break;
9161 case BuiltinType::ULongLong:
9162 EncType = "ull";
9163 break;
9164 case BuiltinType::LongLong:
9165 EncType = "sll";
9166 break;
9167 case BuiltinType::Float:
9168 EncType = "ft";
9169 break;
9170 case BuiltinType::Double:
9171 EncType = "d";
9172 break;
9173 case BuiltinType::LongDouble:
9174 EncType = "ld";
9175 break;
9176 default:
9177 return false;
9178 }
9179 Enc += EncType;
9180 return true;
9181 }
9182
9183 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
appendPointerType(SmallStringEnc & Enc,const PointerType * PT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)9184 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
9185 const CodeGen::CodeGenModule &CGM,
9186 TypeStringCache &TSC) {
9187 Enc += "p(";
9188 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
9189 return false;
9190 Enc += ')';
9191 return true;
9192 }
9193
9194 /// Appends array encoding to Enc before calling appendType for the element.
appendArrayType(SmallStringEnc & Enc,QualType QT,const ArrayType * AT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,StringRef NoSizeEnc)9195 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
9196 const ArrayType *AT,
9197 const CodeGen::CodeGenModule &CGM,
9198 TypeStringCache &TSC, StringRef NoSizeEnc) {
9199 if (AT->getSizeModifier() != ArrayType::Normal)
9200 return false;
9201 Enc += "a(";
9202 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
9203 CAT->getSize().toStringUnsigned(Enc);
9204 else
9205 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
9206 Enc += ':';
9207 // The Qualifiers should be attached to the type rather than the array.
9208 appendQualifier(Enc, QT);
9209 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
9210 return false;
9211 Enc += ')';
9212 return true;
9213 }
9214
9215 /// Appends a function encoding to Enc, calling appendType for the return type
9216 /// and the arguments.
appendFunctionType(SmallStringEnc & Enc,const FunctionType * FT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)9217 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
9218 const CodeGen::CodeGenModule &CGM,
9219 TypeStringCache &TSC) {
9220 Enc += "f{";
9221 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
9222 return false;
9223 Enc += "}(";
9224 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
9225 // N.B. we are only interested in the adjusted param types.
9226 auto I = FPT->param_type_begin();
9227 auto E = FPT->param_type_end();
9228 if (I != E) {
9229 do {
9230 if (!appendType(Enc, *I, CGM, TSC))
9231 return false;
9232 ++I;
9233 if (I != E)
9234 Enc += ',';
9235 } while (I != E);
9236 if (FPT->isVariadic())
9237 Enc += ",va";
9238 } else {
9239 if (FPT->isVariadic())
9240 Enc += "va";
9241 else
9242 Enc += '0';
9243 }
9244 }
9245 Enc += ')';
9246 return true;
9247 }
9248
9249 /// Handles the type's qualifier before dispatching a call to handle specific
9250 /// type encodings.
appendType(SmallStringEnc & Enc,QualType QType,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)9251 static bool appendType(SmallStringEnc &Enc, QualType QType,
9252 const CodeGen::CodeGenModule &CGM,
9253 TypeStringCache &TSC) {
9254
9255 QualType QT = QType.getCanonicalType();
9256
9257 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
9258 // The Qualifiers should be attached to the type rather than the array.
9259 // Thus we don't call appendQualifier() here.
9260 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
9261
9262 appendQualifier(Enc, QT);
9263
9264 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
9265 return appendBuiltinType(Enc, BT);
9266
9267 if (const PointerType *PT = QT->getAs<PointerType>())
9268 return appendPointerType(Enc, PT, CGM, TSC);
9269
9270 if (const EnumType *ET = QT->getAs<EnumType>())
9271 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
9272
9273 if (const RecordType *RT = QT->getAsStructureType())
9274 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9275
9276 if (const RecordType *RT = QT->getAsUnionType())
9277 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9278
9279 if (const FunctionType *FT = QT->getAs<FunctionType>())
9280 return appendFunctionType(Enc, FT, CGM, TSC);
9281
9282 return false;
9283 }
9284
getTypeString(SmallStringEnc & Enc,const Decl * D,CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)9285 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9286 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
9287 if (!D)
9288 return false;
9289
9290 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9291 if (FD->getLanguageLinkage() != CLanguageLinkage)
9292 return false;
9293 return appendType(Enc, FD->getType(), CGM, TSC);
9294 }
9295
9296 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9297 if (VD->getLanguageLinkage() != CLanguageLinkage)
9298 return false;
9299 QualType QT = VD->getType().getCanonicalType();
9300 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
9301 // Global ArrayTypes are given a size of '*' if the size is unknown.
9302 // The Qualifiers should be attached to the type rather than the array.
9303 // Thus we don't call appendQualifier() here.
9304 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
9305 }
9306 return appendType(Enc, QT, CGM, TSC);
9307 }
9308 return false;
9309 }
9310
9311 //===----------------------------------------------------------------------===//
9312 // RISCV ABI Implementation
9313 //===----------------------------------------------------------------------===//
9314
9315 namespace {
9316 class RISCVABIInfo : public DefaultABIInfo {
9317 private:
9318 // Size of the integer ('x') registers in bits.
9319 unsigned XLen;
9320 // Size of the floating point ('f') registers in bits. Note that the target
9321 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
9322 // with soft float ABI has FLen==0).
9323 unsigned FLen;
9324 static const int NumArgGPRs = 8;
9325 static const int NumArgFPRs = 8;
9326 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9327 llvm::Type *&Field1Ty,
9328 CharUnits &Field1Off,
9329 llvm::Type *&Field2Ty,
9330 CharUnits &Field2Off) const;
9331
9332 public:
RISCVABIInfo(CodeGen::CodeGenTypes & CGT,unsigned XLen,unsigned FLen)9333 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
9334 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
9335
9336 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
9337 // non-virtual, but computeInfo is virtual, so we overload it.
9338 void computeInfo(CGFunctionInfo &FI) const override;
9339
9340 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
9341 int &ArgFPRsLeft) const;
9342 ABIArgInfo classifyReturnType(QualType RetTy) const;
9343
9344 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9345 QualType Ty) const override;
9346
9347 ABIArgInfo extendType(QualType Ty) const;
9348
9349 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9350 CharUnits &Field1Off, llvm::Type *&Field2Ty,
9351 CharUnits &Field2Off, int &NeededArgGPRs,
9352 int &NeededArgFPRs) const;
9353 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
9354 CharUnits Field1Off,
9355 llvm::Type *Field2Ty,
9356 CharUnits Field2Off) const;
9357 };
9358 } // end anonymous namespace
9359
computeInfo(CGFunctionInfo & FI) const9360 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
9361 QualType RetTy = FI.getReturnType();
9362 if (!getCXXABI().classifyReturnType(FI))
9363 FI.getReturnInfo() = classifyReturnType(RetTy);
9364
9365 // IsRetIndirect is true if classifyArgumentType indicated the value should
9366 // be passed indirect, or if the type size is a scalar greater than 2*XLen
9367 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
9368 // in LLVM IR, relying on the backend lowering code to rewrite the argument
9369 // list and pass indirectly on RV32.
9370 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
9371 if (!IsRetIndirect && RetTy->isScalarType() &&
9372 getContext().getTypeSize(RetTy) > (2 * XLen)) {
9373 if (RetTy->isComplexType() && FLen) {
9374 QualType EltTy = RetTy->getAs<ComplexType>()->getElementType();
9375 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
9376 } else {
9377 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
9378 IsRetIndirect = true;
9379 }
9380 }
9381
9382 // We must track the number of GPRs used in order to conform to the RISC-V
9383 // ABI, as integer scalars passed in registers should have signext/zeroext
9384 // when promoted, but are anyext if passed on the stack. As GPR usage is
9385 // different for variadic arguments, we must also track whether we are
9386 // examining a vararg or not.
9387 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9388 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
9389 int NumFixedArgs = FI.getNumRequiredArgs();
9390
9391 int ArgNum = 0;
9392 for (auto &ArgInfo : FI.arguments()) {
9393 bool IsFixed = ArgNum < NumFixedArgs;
9394 ArgInfo.info =
9395 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
9396 ArgNum++;
9397 }
9398 }
9399
9400 // Returns true if the struct is a potential candidate for the floating point
9401 // calling convention. If this function returns true, the caller is
9402 // responsible for checking that if there is only a single field then that
9403 // field is a float.
detectFPCCEligibleStructHelper(QualType Ty,CharUnits CurOff,llvm::Type * & Field1Ty,CharUnits & Field1Off,llvm::Type * & Field2Ty,CharUnits & Field2Off) const9404 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9405 llvm::Type *&Field1Ty,
9406 CharUnits &Field1Off,
9407 llvm::Type *&Field2Ty,
9408 CharUnits &Field2Off) const {
9409 bool IsInt = Ty->isIntegralOrEnumerationType();
9410 bool IsFloat = Ty->isRealFloatingType();
9411
9412 if (IsInt || IsFloat) {
9413 uint64_t Size = getContext().getTypeSize(Ty);
9414 if (IsInt && Size > XLen)
9415 return false;
9416 // Can't be eligible if larger than the FP registers. Half precision isn't
9417 // currently supported on RISC-V and the ABI hasn't been confirmed, so
9418 // default to the integer ABI in that case.
9419 if (IsFloat && (Size > FLen || Size < 32))
9420 return false;
9421 // Can't be eligible if an integer type was already found (int+int pairs
9422 // are not eligible).
9423 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
9424 return false;
9425 if (!Field1Ty) {
9426 Field1Ty = CGT.ConvertType(Ty);
9427 Field1Off = CurOff;
9428 return true;
9429 }
9430 if (!Field2Ty) {
9431 Field2Ty = CGT.ConvertType(Ty);
9432 Field2Off = CurOff;
9433 return true;
9434 }
9435 return false;
9436 }
9437
9438 if (auto CTy = Ty->getAs<ComplexType>()) {
9439 if (Field1Ty)
9440 return false;
9441 QualType EltTy = CTy->getElementType();
9442 if (getContext().getTypeSize(EltTy) > FLen)
9443 return false;
9444 Field1Ty = CGT.ConvertType(EltTy);
9445 Field1Off = CurOff;
9446 assert(CurOff.isZero() && "Unexpected offset for first field");
9447 Field2Ty = Field1Ty;
9448 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
9449 return true;
9450 }
9451
9452 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
9453 uint64_t ArraySize = ATy->getSize().getZExtValue();
9454 QualType EltTy = ATy->getElementType();
9455 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
9456 for (uint64_t i = 0; i < ArraySize; ++i) {
9457 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
9458 Field1Off, Field2Ty, Field2Off);
9459 if (!Ret)
9460 return false;
9461 CurOff += EltSize;
9462 }
9463 return true;
9464 }
9465
9466 if (const auto *RTy = Ty->getAs<RecordType>()) {
9467 // Structures with either a non-trivial destructor or a non-trivial
9468 // copy constructor are not eligible for the FP calling convention.
9469 if (getRecordArgABI(Ty, CGT.getCXXABI()))
9470 return false;
9471 if (isEmptyRecord(getContext(), Ty, true))
9472 return true;
9473 const RecordDecl *RD = RTy->getDecl();
9474 // Unions aren't eligible unless they're empty (which is caught above).
9475 if (RD->isUnion())
9476 return false;
9477 int ZeroWidthBitFieldCount = 0;
9478 for (const FieldDecl *FD : RD->fields()) {
9479 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
9480 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
9481 QualType QTy = FD->getType();
9482 if (FD->isBitField()) {
9483 unsigned BitWidth = FD->getBitWidthValue(getContext());
9484 // Allow a bitfield with a type greater than XLen as long as the
9485 // bitwidth is XLen or less.
9486 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
9487 QTy = getContext().getIntTypeForBitwidth(XLen, false);
9488 if (BitWidth == 0) {
9489 ZeroWidthBitFieldCount++;
9490 continue;
9491 }
9492 }
9493
9494 bool Ret = detectFPCCEligibleStructHelper(
9495 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
9496 Field1Ty, Field1Off, Field2Ty, Field2Off);
9497 if (!Ret)
9498 return false;
9499
9500 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
9501 // or int+fp structs, but are ignored for a struct with an fp field and
9502 // any number of zero-width bitfields.
9503 if (Field2Ty && ZeroWidthBitFieldCount > 0)
9504 return false;
9505 }
9506 return Field1Ty != nullptr;
9507 }
9508
9509 return false;
9510 }
9511
9512 // Determine if a struct is eligible for passing according to the floating
9513 // point calling convention (i.e., when flattened it contains a single fp
9514 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
9515 // NeededArgGPRs are incremented appropriately.
detectFPCCEligibleStruct(QualType Ty,llvm::Type * & Field1Ty,CharUnits & Field1Off,llvm::Type * & Field2Ty,CharUnits & Field2Off,int & NeededArgGPRs,int & NeededArgFPRs) const9516 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9517 CharUnits &Field1Off,
9518 llvm::Type *&Field2Ty,
9519 CharUnits &Field2Off,
9520 int &NeededArgGPRs,
9521 int &NeededArgFPRs) const {
9522 Field1Ty = nullptr;
9523 Field2Ty = nullptr;
9524 NeededArgGPRs = 0;
9525 NeededArgFPRs = 0;
9526 bool IsCandidate = detectFPCCEligibleStructHelper(
9527 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
9528 // Not really a candidate if we have a single int but no float.
9529 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
9530 return false;
9531 if (!IsCandidate)
9532 return false;
9533 if (Field1Ty && Field1Ty->isFloatingPointTy())
9534 NeededArgFPRs++;
9535 else if (Field1Ty)
9536 NeededArgGPRs++;
9537 if (Field2Ty && Field2Ty->isFloatingPointTy())
9538 NeededArgFPRs++;
9539 else if (Field2Ty)
9540 NeededArgGPRs++;
9541 return IsCandidate;
9542 }
9543
9544 // Call getCoerceAndExpand for the two-element flattened struct described by
9545 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
9546 // appropriate coerceToType and unpaddedCoerceToType.
coerceAndExpandFPCCEligibleStruct(llvm::Type * Field1Ty,CharUnits Field1Off,llvm::Type * Field2Ty,CharUnits Field2Off) const9547 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
9548 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
9549 CharUnits Field2Off) const {
9550 SmallVector<llvm::Type *, 3> CoerceElts;
9551 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
9552 if (!Field1Off.isZero())
9553 CoerceElts.push_back(llvm::ArrayType::get(
9554 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
9555
9556 CoerceElts.push_back(Field1Ty);
9557 UnpaddedCoerceElts.push_back(Field1Ty);
9558
9559 if (!Field2Ty) {
9560 return ABIArgInfo::getCoerceAndExpand(
9561 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
9562 UnpaddedCoerceElts[0]);
9563 }
9564
9565 CharUnits Field2Align =
9566 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
9567 CharUnits Field1Size =
9568 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
9569 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
9570
9571 CharUnits Padding = CharUnits::Zero();
9572 if (Field2Off > Field2OffNoPadNoPack)
9573 Padding = Field2Off - Field2OffNoPadNoPack;
9574 else if (Field2Off != Field2Align && Field2Off > Field1Size)
9575 Padding = Field2Off - Field1Size;
9576
9577 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
9578
9579 if (!Padding.isZero())
9580 CoerceElts.push_back(llvm::ArrayType::get(
9581 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
9582
9583 CoerceElts.push_back(Field2Ty);
9584 UnpaddedCoerceElts.push_back(Field2Ty);
9585
9586 auto CoerceToType =
9587 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
9588 auto UnpaddedCoerceToType =
9589 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
9590
9591 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
9592 }
9593
classifyArgumentType(QualType Ty,bool IsFixed,int & ArgGPRsLeft,int & ArgFPRsLeft) const9594 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
9595 int &ArgGPRsLeft,
9596 int &ArgFPRsLeft) const {
9597 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
9598 Ty = useFirstFieldIfTransparentUnion(Ty);
9599
9600 // Structures with either a non-trivial destructor or a non-trivial
9601 // copy constructor are always passed indirectly.
9602 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
9603 if (ArgGPRsLeft)
9604 ArgGPRsLeft -= 1;
9605 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
9606 CGCXXABI::RAA_DirectInMemory);
9607 }
9608
9609 // Ignore empty structs/unions.
9610 if (isEmptyRecord(getContext(), Ty, true))
9611 return ABIArgInfo::getIgnore();
9612
9613 uint64_t Size = getContext().getTypeSize(Ty);
9614
9615 // Pass floating point values via FPRs if possible.
9616 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
9617 FLen >= Size && ArgFPRsLeft) {
9618 ArgFPRsLeft--;
9619 return ABIArgInfo::getDirect();
9620 }
9621
9622 // Complex types for the hard float ABI must be passed direct rather than
9623 // using CoerceAndExpand.
9624 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
9625 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
9626 if (getContext().getTypeSize(EltTy) <= FLen) {
9627 ArgFPRsLeft -= 2;
9628 return ABIArgInfo::getDirect();
9629 }
9630 }
9631
9632 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
9633 llvm::Type *Field1Ty = nullptr;
9634 llvm::Type *Field2Ty = nullptr;
9635 CharUnits Field1Off = CharUnits::Zero();
9636 CharUnits Field2Off = CharUnits::Zero();
9637 int NeededArgGPRs;
9638 int NeededArgFPRs;
9639 bool IsCandidate =
9640 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
9641 NeededArgGPRs, NeededArgFPRs);
9642 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
9643 NeededArgFPRs <= ArgFPRsLeft) {
9644 ArgGPRsLeft -= NeededArgGPRs;
9645 ArgFPRsLeft -= NeededArgFPRs;
9646 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
9647 Field2Off);
9648 }
9649 }
9650
9651 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
9652 bool MustUseStack = false;
9653 // Determine the number of GPRs needed to pass the current argument
9654 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
9655 // register pairs, so may consume 3 registers.
9656 int NeededArgGPRs = 1;
9657 if (!IsFixed && NeededAlign == 2 * XLen)
9658 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9659 else if (Size > XLen && Size <= 2 * XLen)
9660 NeededArgGPRs = 2;
9661
9662 if (NeededArgGPRs > ArgGPRsLeft) {
9663 MustUseStack = true;
9664 NeededArgGPRs = ArgGPRsLeft;
9665 }
9666
9667 ArgGPRsLeft -= NeededArgGPRs;
9668
9669 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
9670 // Treat an enum type as its underlying type.
9671 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9672 Ty = EnumTy->getDecl()->getIntegerType();
9673
9674 // All integral types are promoted to XLen width, unless passed on the
9675 // stack.
9676 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9677 return extendType(Ty);
9678 }
9679
9680 return ABIArgInfo::getDirect();
9681 }
9682
9683 // Aggregates which are <= 2*XLen will be passed in registers if possible,
9684 // so coerce to integers.
9685 if (Size <= 2 * XLen) {
9686 unsigned Alignment = getContext().getTypeAlign(Ty);
9687
9688 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
9689 // required, and a 2-element XLen array if only XLen alignment is required.
9690 if (Size <= XLen) {
9691 return ABIArgInfo::getDirect(
9692 llvm::IntegerType::get(getVMContext(), XLen));
9693 } else if (Alignment == 2 * XLen) {
9694 return ABIArgInfo::getDirect(
9695 llvm::IntegerType::get(getVMContext(), 2 * XLen));
9696 } else {
9697 return ABIArgInfo::getDirect(llvm::ArrayType::get(
9698 llvm::IntegerType::get(getVMContext(), XLen), 2));
9699 }
9700 }
9701 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9702 }
9703
classifyReturnType(QualType RetTy) const9704 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
9705 if (RetTy->isVoidType())
9706 return ABIArgInfo::getIgnore();
9707
9708 int ArgGPRsLeft = 2;
9709 int ArgFPRsLeft = FLen ? 2 : 0;
9710
9711 // The rules for return and argument types are the same, so defer to
9712 // classifyArgumentType.
9713 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
9714 ArgFPRsLeft);
9715 }
9716
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const9717 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9718 QualType Ty) const {
9719 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
9720
9721 // Empty records are ignored for parameter passing purposes.
9722 if (isEmptyRecord(getContext(), Ty, true)) {
9723 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
9724 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
9725 return Addr;
9726 }
9727
9728 std::pair<CharUnits, CharUnits> SizeAndAlign =
9729 getContext().getTypeInfoInChars(Ty);
9730
9731 // Arguments bigger than 2*Xlen bytes are passed indirectly.
9732 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9733
9734 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
9735 SlotSize, /*AllowHigherAlign=*/true);
9736 }
9737
extendType(QualType Ty) const9738 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
9739 int TySize = getContext().getTypeSize(Ty);
9740 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
9741 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
9742 return ABIArgInfo::getSignExtend(Ty);
9743 return ABIArgInfo::getExtend(Ty);
9744 }
9745
9746 namespace {
9747 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
9748 public:
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,unsigned XLen,unsigned FLen)9749 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
9750 unsigned FLen)
9751 : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
9752
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const9753 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
9754 CodeGen::CodeGenModule &CGM) const override {
9755 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9756 if (!FD) return;
9757
9758 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
9759 if (!Attr)
9760 return;
9761
9762 const char *Kind;
9763 switch (Attr->getInterrupt()) {
9764 case RISCVInterruptAttr::user: Kind = "user"; break;
9765 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
9766 case RISCVInterruptAttr::machine: Kind = "machine"; break;
9767 }
9768
9769 auto *Fn = cast<llvm::Function>(GV);
9770
9771 Fn->addFnAttr("interrupt", Kind);
9772 }
9773 };
9774 } // namespace
9775
9776 //===----------------------------------------------------------------------===//
9777 // Driver code
9778 //===----------------------------------------------------------------------===//
9779
supportsCOMDAT() const9780 bool CodeGenModule::supportsCOMDAT() const {
9781 return getTriple().supportsCOMDAT();
9782 }
9783
getTargetCodeGenInfo()9784 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
9785 if (TheTargetCodeGenInfo)
9786 return *TheTargetCodeGenInfo;
9787
9788 // Helper to set the unique_ptr while still keeping the return value.
9789 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
9790 this->TheTargetCodeGenInfo.reset(P);
9791 return *P;
9792 };
9793
9794 const llvm::Triple &Triple = getTarget().getTriple();
9795 switch (Triple.getArch()) {
9796 default:
9797 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
9798
9799 case llvm::Triple::le32:
9800 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9801 case llvm::Triple::mips:
9802 case llvm::Triple::mipsel:
9803 if (Triple.getOS() == llvm::Triple::NaCl)
9804 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9805 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
9806
9807 case llvm::Triple::mips64:
9808 case llvm::Triple::mips64el:
9809 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
9810
9811 case llvm::Triple::avr:
9812 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
9813
9814 case llvm::Triple::aarch64:
9815 case llvm::Triple::aarch64_32:
9816 case llvm::Triple::aarch64_be: {
9817 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
9818 if (getTarget().getABI() == "darwinpcs")
9819 Kind = AArch64ABIInfo::DarwinPCS;
9820 else if (Triple.isOSWindows())
9821 return SetCGInfo(
9822 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9823
9824 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
9825 }
9826
9827 case llvm::Triple::wasm32:
9828 case llvm::Triple::wasm64:
9829 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
9830
9831 case llvm::Triple::arm:
9832 case llvm::Triple::armeb:
9833 case llvm::Triple::thumb:
9834 case llvm::Triple::thumbeb: {
9835 if (Triple.getOS() == llvm::Triple::Win32) {
9836 return SetCGInfo(
9837 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9838 }
9839
9840 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
9841 StringRef ABIStr = getTarget().getABI();
9842 if (ABIStr == "apcs-gnu")
9843 Kind = ARMABIInfo::APCS;
9844 else if (ABIStr == "aapcs16")
9845 Kind = ARMABIInfo::AAPCS16_VFP;
9846 else if (CodeGenOpts.FloatABI == "hard" ||
9847 (CodeGenOpts.FloatABI != "soft" &&
9848 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9849 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9850 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9851 Kind = ARMABIInfo::AAPCS_VFP;
9852
9853 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
9854 }
9855
9856 case llvm::Triple::ppc:
9857 return SetCGInfo(
9858 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft" ||
9859 getTarget().hasFeature("spe")));
9860 case llvm::Triple::ppc64:
9861 if (Triple.isOSBinFormatELF()) {
9862 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
9863 if (getTarget().getABI() == "elfv2")
9864 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9865 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9866 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9867
9868 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9869 IsSoftFloat));
9870 } else
9871 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
9872 case llvm::Triple::ppc64le: {
9873 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
9874 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
9875 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
9876 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9877 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9878 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9879
9880 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9881 IsSoftFloat));
9882 }
9883
9884 case llvm::Triple::nvptx:
9885 case llvm::Triple::nvptx64:
9886 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
9887
9888 case llvm::Triple::msp430:
9889 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
9890
9891 case llvm::Triple::riscv32:
9892 case llvm::Triple::riscv64: {
9893 StringRef ABIStr = getTarget().getABI();
9894 unsigned XLen = getTarget().getPointerWidth(0);
9895 unsigned ABIFLen = 0;
9896 if (ABIStr.endswith("f"))
9897 ABIFLen = 32;
9898 else if (ABIStr.endswith("d"))
9899 ABIFLen = 64;
9900 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
9901 }
9902
9903 case llvm::Triple::systemz: {
9904 bool HasVector = getTarget().getABI() == "vector";
9905 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
9906 }
9907
9908 case llvm::Triple::tce:
9909 case llvm::Triple::tcele:
9910 return SetCGInfo(new TCETargetCodeGenInfo(Types));
9911
9912 case llvm::Triple::x86: {
9913 bool IsDarwinVectorABI = Triple.isOSDarwin();
9914 bool RetSmallStructInRegABI =
9915 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9916 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9917
9918 if (Triple.getOS() == llvm::Triple::Win32) {
9919 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
9920 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9921 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9922 } else {
9923 return SetCGInfo(new X86_32TargetCodeGenInfo(
9924 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9925 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9926 CodeGenOpts.FloatABI == "soft"));
9927 }
9928 }
9929
9930 case llvm::Triple::x86_64: {
9931 StringRef ABI = getTarget().getABI();
9932 X86AVXABILevel AVXLevel =
9933 (ABI == "avx512"
9934 ? X86AVXABILevel::AVX512
9935 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
9936
9937 switch (Triple.getOS()) {
9938 case llvm::Triple::Win32:
9939 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9940 default:
9941 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
9942 }
9943 }
9944 case llvm::Triple::hexagon:
9945 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
9946 case llvm::Triple::lanai:
9947 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
9948 case llvm::Triple::r600:
9949 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9950 case llvm::Triple::amdgcn:
9951 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9952 case llvm::Triple::sparc:
9953 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
9954 case llvm::Triple::sparcv9:
9955 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
9956 case llvm::Triple::xcore:
9957 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
9958 case llvm::Triple::arc:
9959 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
9960 case llvm::Triple::spir:
9961 case llvm::Triple::spir64:
9962 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
9963 }
9964 }
9965
9966 /// Create an OpenCL kernel for an enqueued block.
9967 ///
9968 /// The kernel has the same function type as the block invoke function. Its
9969 /// name is the name of the block invoke function postfixed with "_kernel".
9970 /// It simply calls the block invoke function then returns.
9971 llvm::Function *
createEnqueuedBlockKernel(CodeGenFunction & CGF,llvm::Function * Invoke,llvm::Value * BlockLiteral) const9972 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
9973 llvm::Function *Invoke,
9974 llvm::Value *BlockLiteral) const {
9975 auto *InvokeFT = Invoke->getFunctionType();
9976 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9977 for (auto &P : InvokeFT->params())
9978 ArgTys.push_back(P);
9979 auto &C = CGF.getLLVMContext();
9980 std::string Name = Invoke->getName().str() + "_kernel";
9981 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9982 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9983 &CGF.CGM.getModule());
9984 auto IP = CGF.Builder.saveIP();
9985 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9986 auto &Builder = CGF.Builder;
9987 Builder.SetInsertPoint(BB);
9988 llvm::SmallVector<llvm::Value *, 2> Args;
9989 for (auto &A : F->args())
9990 Args.push_back(&A);
9991 Builder.CreateCall(Invoke, Args);
9992 Builder.CreateRetVoid();
9993 Builder.restoreIP(IP);
9994 return F;
9995 }
9996
9997 /// Create an OpenCL kernel for an enqueued block.
9998 ///
9999 /// The type of the first argument (the block literal) is the struct type
10000 /// of the block literal instead of a pointer type. The first argument
10001 /// (block literal) is passed directly by value to the kernel. The kernel
10002 /// allocates the same type of struct on stack and stores the block literal
10003 /// to it and passes its pointer to the block invoke function. The kernel
10004 /// has "enqueued-block" function attribute and kernel argument metadata.
createEnqueuedBlockKernel(CodeGenFunction & CGF,llvm::Function * Invoke,llvm::Value * BlockLiteral) const10005 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
10006 CodeGenFunction &CGF, llvm::Function *Invoke,
10007 llvm::Value *BlockLiteral) const {
10008 auto &Builder = CGF.Builder;
10009 auto &C = CGF.getLLVMContext();
10010
10011 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
10012 auto *InvokeFT = Invoke->getFunctionType();
10013 llvm::SmallVector<llvm::Type *, 2> ArgTys;
10014 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
10015 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
10016 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
10017 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
10018 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
10019 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
10020
10021 ArgTys.push_back(BlockTy);
10022 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
10023 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
10024 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
10025 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
10026 AccessQuals.push_back(llvm::MDString::get(C, "none"));
10027 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
10028 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
10029 ArgTys.push_back(InvokeFT->getParamType(I));
10030 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
10031 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
10032 AccessQuals.push_back(llvm::MDString::get(C, "none"));
10033 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
10034 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
10035 ArgNames.push_back(
10036 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
10037 }
10038 std::string Name = Invoke->getName().str() + "_kernel";
10039 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
10040 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
10041 &CGF.CGM.getModule());
10042 F->addFnAttr("enqueued-block");
10043 auto IP = CGF.Builder.saveIP();
10044 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
10045 Builder.SetInsertPoint(BB);
10046 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
10047 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
10048 BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
10049 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
10050 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
10051 llvm::SmallVector<llvm::Value *, 2> Args;
10052 Args.push_back(Cast);
10053 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
10054 Args.push_back(I);
10055 Builder.CreateCall(Invoke, Args);
10056 Builder.CreateRetVoid();
10057 Builder.restoreIP(IP);
10058
10059 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
10060 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
10061 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
10062 F->setMetadata("kernel_arg_base_type",
10063 llvm::MDNode::get(C, ArgBaseTypeNames));
10064 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
10065 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
10066 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
10067
10068 return F;
10069 }
10070