1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "TargetInfo.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGValue.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "clang/Basic/DiagnosticFrontend.h"
24 #include "clang/CodeGen/CGFunctionInfo.h"
25 #include "clang/CodeGen/SwiftCallingConv.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/ADT/Triple.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/IntrinsicsNVPTX.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <algorithm> // std::sort
36
37 using namespace clang;
38 using namespace CodeGen;
39
40 // Helper for coercing an aggregate argument or return value into an integer
41 // array of the same size (including padding) and alignment. This alternate
42 // coercion happens only for the RenderScript ABI and can be removed after
43 // runtimes that rely on it are no longer supported.
44 //
45 // RenderScript assumes that the size of the argument / return value in the IR
46 // is the same as the size of the corresponding qualified type. This helper
47 // coerces the aggregate type into an array of the same size (including
48 // padding). This coercion is used in lieu of expansion of struct members or
49 // other canonical coercions that return a coerced-type of larger size.
50 //
51 // Ty - The argument / return value type
52 // Context - The associated ASTContext
53 // LLVMContext - The associated LLVMContext
coerceToIntArray(QualType Ty,ASTContext & Context,llvm::LLVMContext & LLVMContext)54 static ABIArgInfo coerceToIntArray(QualType Ty,
55 ASTContext &Context,
56 llvm::LLVMContext &LLVMContext) {
57 // Alignment and Size are measured in bits.
58 const uint64_t Size = Context.getTypeSize(Ty);
59 const uint64_t Alignment = Context.getTypeAlign(Ty);
60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
63 }
64
AssignToArrayRange(CodeGen::CGBuilderTy & Builder,llvm::Value * Array,llvm::Value * Value,unsigned FirstIndex,unsigned LastIndex)65 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
66 llvm::Value *Array,
67 llvm::Value *Value,
68 unsigned FirstIndex,
69 unsigned LastIndex) {
70 // Alternatively, we could emit this as a loop in the source.
71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
72 llvm::Value *Cell =
73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
75 }
76 }
77
isAggregateTypeForABI(QualType T)78 static bool isAggregateTypeForABI(QualType T) {
79 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
80 T->isMemberFunctionPointerType();
81 }
82
getNaturalAlignIndirect(QualType Ty,bool ByVal,bool Realign,llvm::Type * Padding) const83 ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
84 bool Realign,
85 llvm::Type *Padding) const {
86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
87 Realign, Padding);
88 }
89
90 ABIArgInfo
getNaturalAlignIndirectInReg(QualType Ty,bool Realign) const91 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
93 /*ByVal*/ false, Realign);
94 }
95
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const96 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
97 QualType Ty) const {
98 return Address::invalid();
99 }
100
isPromotableIntegerTypeForABI(QualType Ty) const101 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
102 if (Ty->isPromotableIntegerType())
103 return true;
104
105 if (const auto *EIT = Ty->getAs<ExtIntType>())
106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
107 return true;
108
109 return false;
110 }
111
~ABIInfo()112 ABIInfo::~ABIInfo() {}
113
114 /// Does the given lowering require more than the given number of
115 /// registers when expanded?
116 ///
117 /// This is intended to be the basis of a reasonable basic implementation
118 /// of should{Pass,Return}IndirectlyForSwift.
119 ///
120 /// For most targets, a limit of four total registers is reasonable; this
121 /// limits the amount of code required in order to move around the value
122 /// in case it wasn't produced immediately prior to the call by the caller
123 /// (or wasn't produced in exactly the right registers) or isn't used
124 /// immediately within the callee. But some targets may need to further
125 /// limit the register count due to an inability to support that many
126 /// return registers.
occupiesMoreThan(CodeGenTypes & cgt,ArrayRef<llvm::Type * > scalarTypes,unsigned maxAllRegisters)127 static bool occupiesMoreThan(CodeGenTypes &cgt,
128 ArrayRef<llvm::Type*> scalarTypes,
129 unsigned maxAllRegisters) {
130 unsigned intCount = 0, fpCount = 0;
131 for (llvm::Type *type : scalarTypes) {
132 if (type->isPointerTy()) {
133 intCount++;
134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
135 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
137 } else {
138 assert(type->isVectorTy() || type->isFloatingPointTy());
139 fpCount++;
140 }
141 }
142
143 return (intCount + fpCount > maxAllRegisters);
144 }
145
isLegalVectorTypeForSwift(CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts) const146 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
147 llvm::Type *eltTy,
148 unsigned numElts) const {
149 // The default implementation of this assumes that the target guarantees
150 // 128-bit SIMD support but nothing more.
151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
152 }
153
getRecordArgABI(const RecordType * RT,CGCXXABI & CXXABI)154 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
155 CGCXXABI &CXXABI) {
156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
157 if (!RD) {
158 if (!RT->getDecl()->canPassInRegisters())
159 return CGCXXABI::RAA_Indirect;
160 return CGCXXABI::RAA_Default;
161 }
162 return CXXABI.getRecordArgABI(RD);
163 }
164
getRecordArgABI(QualType T,CGCXXABI & CXXABI)165 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
166 CGCXXABI &CXXABI) {
167 const RecordType *RT = T->getAs<RecordType>();
168 if (!RT)
169 return CGCXXABI::RAA_Default;
170 return getRecordArgABI(RT, CXXABI);
171 }
172
classifyReturnType(const CGCXXABI & CXXABI,CGFunctionInfo & FI,const ABIInfo & Info)173 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
174 const ABIInfo &Info) {
175 QualType Ty = FI.getReturnType();
176
177 if (const auto *RT = Ty->getAs<RecordType>())
178 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
179 !RT->getDecl()->canPassInRegisters()) {
180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
181 return true;
182 }
183
184 return CXXABI.classifyReturnType(FI);
185 }
186
187 /// Pass transparent unions as if they were the type of the first element. Sema
188 /// should ensure that all elements of the union have the same "machine type".
useFirstFieldIfTransparentUnion(QualType Ty)189 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
190 if (const RecordType *UT = Ty->getAsUnionType()) {
191 const RecordDecl *UD = UT->getDecl();
192 if (UD->hasAttr<TransparentUnionAttr>()) {
193 assert(!UD->field_empty() && "sema created an empty transparent union");
194 return UD->field_begin()->getType();
195 }
196 }
197 return Ty;
198 }
199
getCXXABI() const200 CGCXXABI &ABIInfo::getCXXABI() const {
201 return CGT.getCXXABI();
202 }
203
getContext() const204 ASTContext &ABIInfo::getContext() const {
205 return CGT.getContext();
206 }
207
getVMContext() const208 llvm::LLVMContext &ABIInfo::getVMContext() const {
209 return CGT.getLLVMContext();
210 }
211
getDataLayout() const212 const llvm::DataLayout &ABIInfo::getDataLayout() const {
213 return CGT.getDataLayout();
214 }
215
getTarget() const216 const TargetInfo &ABIInfo::getTarget() const {
217 return CGT.getTarget();
218 }
219
getCodeGenOpts() const220 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
221 return CGT.getCodeGenOpts();
222 }
223
isAndroid() const224 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
225
isHomogeneousAggregateBaseType(QualType Ty) const226 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
227 return false;
228 }
229
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const230 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
231 uint64_t Members) const {
232 return false;
233 }
234
dump() const235 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
236 raw_ostream &OS = llvm::errs();
237 OS << "(ABIArgInfo Kind=";
238 switch (TheKind) {
239 case Direct:
240 OS << "Direct Type=";
241 if (llvm::Type *Ty = getCoerceToType())
242 Ty->print(OS);
243 else
244 OS << "null";
245 break;
246 case Extend:
247 OS << "Extend";
248 break;
249 case Ignore:
250 OS << "Ignore";
251 break;
252 case InAlloca:
253 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
254 break;
255 case Indirect:
256 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
257 << " ByVal=" << getIndirectByVal()
258 << " Realign=" << getIndirectRealign();
259 break;
260 case IndirectAliased:
261 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
262 << " AadrSpace=" << getIndirectAddrSpace()
263 << " Realign=" << getIndirectRealign();
264 break;
265 case Expand:
266 OS << "Expand";
267 break;
268 case CoerceAndExpand:
269 OS << "CoerceAndExpand Type=";
270 getCoerceAndExpandType()->print(OS);
271 break;
272 }
273 OS << ")\n";
274 }
275
276 // Dynamically round a pointer up to a multiple of the given alignment.
emitRoundPointerUpToAlignment(CodeGenFunction & CGF,llvm::Value * Ptr,CharUnits Align)277 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
278 llvm::Value *Ptr,
279 CharUnits Align) {
280 llvm::Value *PtrAsInt = Ptr;
281 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
282 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
283 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
284 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
285 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
286 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
287 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
288 Ptr->getType(),
289 Ptr->getName() + ".aligned");
290 return PtrAsInt;
291 }
292
293 /// Emit va_arg for a platform using the common void* representation,
294 /// where arguments are simply emitted in an array of slots on the stack.
295 ///
296 /// This version implements the core direct-value passing rules.
297 ///
298 /// \param SlotSize - The size and alignment of a stack slot.
299 /// Each argument will be allocated to a multiple of this number of
300 /// slots, and all the slots will be aligned to this value.
301 /// \param AllowHigherAlign - The slot alignment is not a cap;
302 /// an argument type with an alignment greater than the slot size
303 /// will be emitted on a higher-alignment address, potentially
304 /// leaving one or more empty slots behind as padding. If this
305 /// is false, the returned address might be less-aligned than
306 /// DirectAlign.
emitVoidPtrDirectVAArg(CodeGenFunction & CGF,Address VAListAddr,llvm::Type * DirectTy,CharUnits DirectSize,CharUnits DirectAlign,CharUnits SlotSize,bool AllowHigherAlign)307 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
308 Address VAListAddr,
309 llvm::Type *DirectTy,
310 CharUnits DirectSize,
311 CharUnits DirectAlign,
312 CharUnits SlotSize,
313 bool AllowHigherAlign) {
314 // Cast the element type to i8* if necessary. Some platforms define
315 // va_list as a struct containing an i8* instead of just an i8*.
316 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
317 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
318
319 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
320
321 // If the CC aligns values higher than the slot size, do so if needed.
322 Address Addr = Address::invalid();
323 if (AllowHigherAlign && DirectAlign > SlotSize) {
324 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
325 DirectAlign);
326 } else {
327 Addr = Address(Ptr, SlotSize);
328 }
329
330 // Advance the pointer past the argument, then store that back.
331 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
332 Address NextPtr =
333 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
334 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
335
336 // If the argument is smaller than a slot, and this is a big-endian
337 // target, the argument will be right-adjusted in its slot.
338 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
339 !DirectTy->isStructTy()) {
340 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
341 }
342
343 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
344 return Addr;
345 }
346
347 /// Emit va_arg for a platform using the common void* representation,
348 /// where arguments are simply emitted in an array of slots on the stack.
349 ///
350 /// \param IsIndirect - Values of this type are passed indirectly.
351 /// \param ValueInfo - The size and alignment of this type, generally
352 /// computed with getContext().getTypeInfoInChars(ValueTy).
353 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
354 /// Each argument will be allocated to a multiple of this number of
355 /// slots, and all the slots will be aligned to this value.
356 /// \param AllowHigherAlign - The slot alignment is not a cap;
357 /// an argument type with an alignment greater than the slot size
358 /// will be emitted on a higher-alignment address, potentially
359 /// leaving one or more empty slots behind as padding.
emitVoidPtrVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType ValueTy,bool IsIndirect,TypeInfoChars ValueInfo,CharUnits SlotSizeAndAlign,bool AllowHigherAlign)360 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
361 QualType ValueTy, bool IsIndirect,
362 TypeInfoChars ValueInfo,
363 CharUnits SlotSizeAndAlign,
364 bool AllowHigherAlign) {
365 // The size and alignment of the value that was passed directly.
366 CharUnits DirectSize, DirectAlign;
367 if (IsIndirect) {
368 DirectSize = CGF.getPointerSize();
369 DirectAlign = CGF.getPointerAlign();
370 } else {
371 DirectSize = ValueInfo.Width;
372 DirectAlign = ValueInfo.Align;
373 }
374
375 // Cast the address we've calculated to the right type.
376 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
377 if (IsIndirect)
378 DirectTy = DirectTy->getPointerTo(0);
379
380 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
381 DirectSize, DirectAlign,
382 SlotSizeAndAlign,
383 AllowHigherAlign);
384
385 if (IsIndirect) {
386 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
387 }
388
389 return Addr;
390
391 }
392
emitMergePHI(CodeGenFunction & CGF,Address Addr1,llvm::BasicBlock * Block1,Address Addr2,llvm::BasicBlock * Block2,const llvm::Twine & Name="")393 static Address emitMergePHI(CodeGenFunction &CGF,
394 Address Addr1, llvm::BasicBlock *Block1,
395 Address Addr2, llvm::BasicBlock *Block2,
396 const llvm::Twine &Name = "") {
397 assert(Addr1.getType() == Addr2.getType());
398 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
399 PHI->addIncoming(Addr1.getPointer(), Block1);
400 PHI->addIncoming(Addr2.getPointer(), Block2);
401 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
402 return Address(PHI, Align);
403 }
404
405 TargetCodeGenInfo::~TargetCodeGenInfo() = default;
406
407 // If someone can figure out a general rule for this, that would be great.
408 // It's probably just doomed to be platform-dependent, though.
getSizeOfUnwindException() const409 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
410 // Verified for:
411 // x86-64 FreeBSD, Linux, Darwin
412 // x86-32 FreeBSD, Linux, Darwin
413 // PowerPC Linux, Darwin
414 // ARM Darwin (*not* EABI)
415 // AArch64 Linux
416 return 32;
417 }
418
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const419 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
420 const FunctionNoProtoType *fnType) const {
421 // The following conventions are known to require this to be false:
422 // x86_stdcall
423 // MIPS
424 // For everything else, we just prefer false unless we opt out.
425 return false;
426 }
427
428 void
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const429 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
430 llvm::SmallString<24> &Opt) const {
431 // This assumes the user is passing a library name like "rt" instead of a
432 // filename like "librt.a/so", and that they don't care whether it's static or
433 // dynamic.
434 Opt = "-l";
435 Opt += Lib;
436 }
437
getOpenCLKernelCallingConv() const438 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
439 // OpenCL kernels are called via an explicit runtime API with arguments
440 // set with clSetKernelArg(), not as normal sub-functions.
441 // Return SPIR_KERNEL by default as the kernel calling convention to
442 // ensure the fingerprint is fixed such way that each OpenCL argument
443 // gets one matching argument in the produced kernel function argument
444 // list to enable feasible implementation of clSetKernelArg() with
445 // aggregates etc. In case we would use the default C calling conv here,
446 // clSetKernelArg() might break depending on the target-specific
447 // conventions; different targets might split structs passed as values
448 // to multiple function arguments etc.
449 return llvm::CallingConv::SPIR_KERNEL;
450 }
451
getNullPointer(const CodeGen::CodeGenModule & CGM,llvm::PointerType * T,QualType QT) const452 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
453 llvm::PointerType *T, QualType QT) const {
454 return llvm::ConstantPointerNull::get(T);
455 }
456
getGlobalVarAddressSpace(CodeGenModule & CGM,const VarDecl * D) const457 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
458 const VarDecl *D) const {
459 assert(!CGM.getLangOpts().OpenCL &&
460 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
461 "Address space agnostic languages only");
462 return D ? D->getType().getAddressSpace() : LangAS::Default;
463 }
464
performAddrSpaceCast(CodeGen::CodeGenFunction & CGF,llvm::Value * Src,LangAS SrcAddr,LangAS DestAddr,llvm::Type * DestTy,bool isNonNull) const465 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
466 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
467 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
468 // Since target may map different address spaces in AST to the same address
469 // space, an address space conversion may end up as a bitcast.
470 if (auto *C = dyn_cast<llvm::Constant>(Src))
471 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
472 // Try to preserve the source's name to make IR more readable.
473 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
474 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
475 }
476
477 llvm::Constant *
performAddrSpaceCast(CodeGenModule & CGM,llvm::Constant * Src,LangAS SrcAddr,LangAS DestAddr,llvm::Type * DestTy) const478 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
479 LangAS SrcAddr, LangAS DestAddr,
480 llvm::Type *DestTy) const {
481 // Since target may map different address spaces in AST to the same address
482 // space, an address space conversion may end up as a bitcast.
483 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
484 }
485
486 llvm::SyncScope::ID
getLLVMSyncScopeID(const LangOptions & LangOpts,SyncScope Scope,llvm::AtomicOrdering Ordering,llvm::LLVMContext & Ctx) const487 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
488 SyncScope Scope,
489 llvm::AtomicOrdering Ordering,
490 llvm::LLVMContext &Ctx) const {
491 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
492 }
493
494 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
495
496 /// isEmptyField - Return true iff a the field is "empty", that is it
497 /// is an unnamed bit-field or an (array of) empty record(s).
isEmptyField(ASTContext & Context,const FieldDecl * FD,bool AllowArrays)498 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
499 bool AllowArrays) {
500 if (FD->isUnnamedBitfield())
501 return true;
502
503 QualType FT = FD->getType();
504
505 // Constant arrays of empty records count as empty, strip them off.
506 // Constant arrays of zero length always count as empty.
507 bool WasArray = false;
508 if (AllowArrays)
509 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
510 if (AT->getSize() == 0)
511 return true;
512 FT = AT->getElementType();
513 // The [[no_unique_address]] special case below does not apply to
514 // arrays of C++ empty records, so we need to remember this fact.
515 WasArray = true;
516 }
517
518 const RecordType *RT = FT->getAs<RecordType>();
519 if (!RT)
520 return false;
521
522 // C++ record fields are never empty, at least in the Itanium ABI.
523 //
524 // FIXME: We should use a predicate for whether this behavior is true in the
525 // current ABI.
526 //
527 // The exception to the above rule are fields marked with the
528 // [[no_unique_address]] attribute (since C++20). Those do count as empty
529 // according to the Itanium ABI. The exception applies only to records,
530 // not arrays of records, so we must also check whether we stripped off an
531 // array type above.
532 if (isa<CXXRecordDecl>(RT->getDecl()) &&
533 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
534 return false;
535
536 return isEmptyRecord(Context, FT, AllowArrays);
537 }
538
539 /// isEmptyRecord - Return true iff a structure contains only empty
540 /// fields. Note that a structure with a flexible array member is not
541 /// considered empty.
isEmptyRecord(ASTContext & Context,QualType T,bool AllowArrays)542 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
543 const RecordType *RT = T->getAs<RecordType>();
544 if (!RT)
545 return false;
546 const RecordDecl *RD = RT->getDecl();
547 if (RD->hasFlexibleArrayMember())
548 return false;
549
550 // If this is a C++ record, check the bases first.
551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
552 for (const auto &I : CXXRD->bases())
553 if (!isEmptyRecord(Context, I.getType(), true))
554 return false;
555
556 for (const auto *I : RD->fields())
557 if (!isEmptyField(Context, I, AllowArrays))
558 return false;
559 return true;
560 }
561
562 /// isSingleElementStruct - Determine if a structure is a "single
563 /// element struct", i.e. it has exactly one non-empty field or
564 /// exactly one field which is itself a single element
565 /// struct. Structures with flexible array members are never
566 /// considered single element structs.
567 ///
568 /// \return The field declaration for the single non-empty field, if
569 /// it exists.
isSingleElementStruct(QualType T,ASTContext & Context)570 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
571 const RecordType *RT = T->getAs<RecordType>();
572 if (!RT)
573 return nullptr;
574
575 const RecordDecl *RD = RT->getDecl();
576 if (RD->hasFlexibleArrayMember())
577 return nullptr;
578
579 const Type *Found = nullptr;
580
581 // If this is a C++ record, check the bases first.
582 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
583 for (const auto &I : CXXRD->bases()) {
584 // Ignore empty records.
585 if (isEmptyRecord(Context, I.getType(), true))
586 continue;
587
588 // If we already found an element then this isn't a single-element struct.
589 if (Found)
590 return nullptr;
591
592 // If this is non-empty and not a single element struct, the composite
593 // cannot be a single element struct.
594 Found = isSingleElementStruct(I.getType(), Context);
595 if (!Found)
596 return nullptr;
597 }
598 }
599
600 // Check for single element.
601 for (const auto *FD : RD->fields()) {
602 QualType FT = FD->getType();
603
604 // Ignore empty fields.
605 if (isEmptyField(Context, FD, true))
606 continue;
607
608 // If we already found an element then this isn't a single-element
609 // struct.
610 if (Found)
611 return nullptr;
612
613 // Treat single element arrays as the element.
614 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
615 if (AT->getSize().getZExtValue() != 1)
616 break;
617 FT = AT->getElementType();
618 }
619
620 if (!isAggregateTypeForABI(FT)) {
621 Found = FT.getTypePtr();
622 } else {
623 Found = isSingleElementStruct(FT, Context);
624 if (!Found)
625 return nullptr;
626 }
627 }
628
629 // We don't consider a struct a single-element struct if it has
630 // padding beyond the element type.
631 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
632 return nullptr;
633
634 return Found;
635 }
636
637 namespace {
EmitVAArgInstr(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,const ABIArgInfo & AI)638 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
639 const ABIArgInfo &AI) {
640 // This default implementation defers to the llvm backend's va_arg
641 // instruction. It can handle only passing arguments directly
642 // (typically only handled in the backend for primitive types), or
643 // aggregates passed indirectly by pointer (NOTE: if the "byval"
644 // flag has ABI impact in the callee, this implementation cannot
645 // work.)
646
647 // Only a few cases are covered here at the moment -- those needed
648 // by the default abi.
649 llvm::Value *Val;
650
651 if (AI.isIndirect()) {
652 assert(!AI.getPaddingType() &&
653 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
654 assert(
655 !AI.getIndirectRealign() &&
656 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
657
658 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
659 CharUnits TyAlignForABI = TyInfo.Align;
660
661 llvm::Type *BaseTy =
662 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
663 llvm::Value *Addr =
664 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
665 return Address(Addr, TyAlignForABI);
666 } else {
667 assert((AI.isDirect() || AI.isExtend()) &&
668 "Unexpected ArgInfo Kind in generic VAArg emitter!");
669
670 assert(!AI.getInReg() &&
671 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
672 assert(!AI.getPaddingType() &&
673 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
674 assert(!AI.getDirectOffset() &&
675 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
676 assert(!AI.getCoerceToType() &&
677 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
678
679 Address Temp = CGF.CreateMemTemp(Ty, "varet");
680 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
681 CGF.Builder.CreateStore(Val, Temp);
682 return Temp;
683 }
684 }
685
686 /// DefaultABIInfo - The default implementation for ABI specific
687 /// details. This implementation provides information which results in
688 /// self-consistent and sensible LLVM IR generation, but does not
689 /// conform to any particular ABI.
690 class DefaultABIInfo : public ABIInfo {
691 public:
DefaultABIInfo(CodeGen::CodeGenTypes & CGT)692 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
693
694 ABIArgInfo classifyReturnType(QualType RetTy) const;
695 ABIArgInfo classifyArgumentType(QualType RetTy) const;
696
computeInfo(CGFunctionInfo & FI) const697 void computeInfo(CGFunctionInfo &FI) const override {
698 if (!getCXXABI().classifyReturnType(FI))
699 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
700 for (auto &I : FI.arguments())
701 I.info = classifyArgumentType(I.type);
702 }
703
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const704 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
705 QualType Ty) const override {
706 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
707 }
708 };
709
710 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
711 public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)712 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
713 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
714 };
715
classifyArgumentType(QualType Ty) const716 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
717 Ty = useFirstFieldIfTransparentUnion(Ty);
718
719 if (isAggregateTypeForABI(Ty)) {
720 // Records with non-trivial destructors/copy-constructors should not be
721 // passed by value.
722 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
723 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
724
725 return getNaturalAlignIndirect(Ty);
726 }
727
728 // Treat an enum type as its underlying type.
729 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
730 Ty = EnumTy->getDecl()->getIntegerType();
731
732 ASTContext &Context = getContext();
733 if (const auto *EIT = Ty->getAs<ExtIntType>())
734 if (EIT->getNumBits() >
735 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
736 ? Context.Int128Ty
737 : Context.LongLongTy))
738 return getNaturalAlignIndirect(Ty);
739
740 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
741 : ABIArgInfo::getDirect());
742 }
743
classifyReturnType(QualType RetTy) const744 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
745 if (RetTy->isVoidType())
746 return ABIArgInfo::getIgnore();
747
748 if (isAggregateTypeForABI(RetTy))
749 return getNaturalAlignIndirect(RetTy);
750
751 // Treat an enum type as its underlying type.
752 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
753 RetTy = EnumTy->getDecl()->getIntegerType();
754
755 if (const auto *EIT = RetTy->getAs<ExtIntType>())
756 if (EIT->getNumBits() >
757 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
758 ? getContext().Int128Ty
759 : getContext().LongLongTy))
760 return getNaturalAlignIndirect(RetTy);
761
762 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
763 : ABIArgInfo::getDirect());
764 }
765
766 //===----------------------------------------------------------------------===//
767 // WebAssembly ABI Implementation
768 //
769 // This is a very simple ABI that relies a lot on DefaultABIInfo.
770 //===----------------------------------------------------------------------===//
771
772 class WebAssemblyABIInfo final : public SwiftABIInfo {
773 public:
774 enum ABIKind {
775 MVP = 0,
776 ExperimentalMV = 1,
777 };
778
779 private:
780 DefaultABIInfo defaultInfo;
781 ABIKind Kind;
782
783 public:
WebAssemblyABIInfo(CodeGen::CodeGenTypes & CGT,ABIKind Kind)784 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
785 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
786
787 private:
788 ABIArgInfo classifyReturnType(QualType RetTy) const;
789 ABIArgInfo classifyArgumentType(QualType Ty) const;
790
791 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
792 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
793 // overload them.
computeInfo(CGFunctionInfo & FI) const794 void computeInfo(CGFunctionInfo &FI) const override {
795 if (!getCXXABI().classifyReturnType(FI))
796 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
797 for (auto &Arg : FI.arguments())
798 Arg.info = classifyArgumentType(Arg.type);
799 }
800
801 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
802 QualType Ty) const override;
803
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const804 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
805 bool asReturnValue) const override {
806 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
807 }
808
isSwiftErrorInRegister() const809 bool isSwiftErrorInRegister() const override {
810 return false;
811 }
812 };
813
814 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
815 public:
WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,WebAssemblyABIInfo::ABIKind K)816 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
817 WebAssemblyABIInfo::ABIKind K)
818 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
819
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const820 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
821 CodeGen::CodeGenModule &CGM) const override {
822 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
823 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
824 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
825 llvm::Function *Fn = cast<llvm::Function>(GV);
826 llvm::AttrBuilder B;
827 B.addAttribute("wasm-import-module", Attr->getImportModule());
828 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
829 }
830 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
831 llvm::Function *Fn = cast<llvm::Function>(GV);
832 llvm::AttrBuilder B;
833 B.addAttribute("wasm-import-name", Attr->getImportName());
834 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
835 }
836 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
837 llvm::Function *Fn = cast<llvm::Function>(GV);
838 llvm::AttrBuilder B;
839 B.addAttribute("wasm-export-name", Attr->getExportName());
840 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
841 }
842 }
843
844 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
845 llvm::Function *Fn = cast<llvm::Function>(GV);
846 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
847 Fn->addFnAttr("no-prototype");
848 }
849 }
850 };
851
852 /// Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const853 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
854 Ty = useFirstFieldIfTransparentUnion(Ty);
855
856 if (isAggregateTypeForABI(Ty)) {
857 // Records with non-trivial destructors/copy-constructors should not be
858 // passed by value.
859 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
860 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
861 // Ignore empty structs/unions.
862 if (isEmptyRecord(getContext(), Ty, true))
863 return ABIArgInfo::getIgnore();
864 // Lower single-element structs to just pass a regular value. TODO: We
865 // could do reasonable-size multiple-element structs too, using getExpand(),
866 // though watch out for things like bitfields.
867 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
868 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
869 // For the experimental multivalue ABI, fully expand all other aggregates
870 if (Kind == ABIKind::ExperimentalMV) {
871 const RecordType *RT = Ty->getAs<RecordType>();
872 assert(RT);
873 bool HasBitField = false;
874 for (auto *Field : RT->getDecl()->fields()) {
875 if (Field->isBitField()) {
876 HasBitField = true;
877 break;
878 }
879 }
880 if (!HasBitField)
881 return ABIArgInfo::getExpand();
882 }
883 }
884
885 // Otherwise just do the default thing.
886 return defaultInfo.classifyArgumentType(Ty);
887 }
888
classifyReturnType(QualType RetTy) const889 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
890 if (isAggregateTypeForABI(RetTy)) {
891 // Records with non-trivial destructors/copy-constructors should not be
892 // returned by value.
893 if (!getRecordArgABI(RetTy, getCXXABI())) {
894 // Ignore empty structs/unions.
895 if (isEmptyRecord(getContext(), RetTy, true))
896 return ABIArgInfo::getIgnore();
897 // Lower single-element structs to just return a regular value. TODO: We
898 // could do reasonable-size multiple-element structs too, using
899 // ABIArgInfo::getDirect().
900 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
901 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
902 // For the experimental multivalue ABI, return all other aggregates
903 if (Kind == ABIKind::ExperimentalMV)
904 return ABIArgInfo::getDirect();
905 }
906 }
907
908 // Otherwise just do the default thing.
909 return defaultInfo.classifyReturnType(RetTy);
910 }
911
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const912 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
913 QualType Ty) const {
914 bool IsIndirect = isAggregateTypeForABI(Ty) &&
915 !isEmptyRecord(getContext(), Ty, true) &&
916 !isSingleElementStruct(Ty, getContext());
917 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
918 getContext().getTypeInfoInChars(Ty),
919 CharUnits::fromQuantity(4),
920 /*AllowHigherAlign=*/true);
921 }
922
923 //===----------------------------------------------------------------------===//
924 // le32/PNaCl bitcode ABI Implementation
925 //
926 // This is a simplified version of the x86_32 ABI. Arguments and return values
927 // are always passed on the stack.
928 //===----------------------------------------------------------------------===//
929
930 class PNaClABIInfo : public ABIInfo {
931 public:
PNaClABIInfo(CodeGen::CodeGenTypes & CGT)932 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
933
934 ABIArgInfo classifyReturnType(QualType RetTy) const;
935 ABIArgInfo classifyArgumentType(QualType RetTy) const;
936
937 void computeInfo(CGFunctionInfo &FI) const override;
938 Address EmitVAArg(CodeGenFunction &CGF,
939 Address VAListAddr, QualType Ty) const override;
940 };
941
942 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
943 public:
PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)944 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
945 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
946 };
947
computeInfo(CGFunctionInfo & FI) const948 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
949 if (!getCXXABI().classifyReturnType(FI))
950 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
951
952 for (auto &I : FI.arguments())
953 I.info = classifyArgumentType(I.type);
954 }
955
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const956 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
957 QualType Ty) const {
958 // The PNaCL ABI is a bit odd, in that varargs don't use normal
959 // function classification. Structs get passed directly for varargs
960 // functions, through a rewriting transform in
961 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
962 // this target to actually support a va_arg instructions with an
963 // aggregate type, unlike other targets.
964 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
965 }
966
967 /// Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const968 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
969 if (isAggregateTypeForABI(Ty)) {
970 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
971 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
972 return getNaturalAlignIndirect(Ty);
973 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
974 // Treat an enum type as its underlying type.
975 Ty = EnumTy->getDecl()->getIntegerType();
976 } else if (Ty->isFloatingType()) {
977 // Floating-point types don't go inreg.
978 return ABIArgInfo::getDirect();
979 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
980 // Treat extended integers as integers if <=64, otherwise pass indirectly.
981 if (EIT->getNumBits() > 64)
982 return getNaturalAlignIndirect(Ty);
983 return ABIArgInfo::getDirect();
984 }
985
986 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
987 : ABIArgInfo::getDirect());
988 }
989
classifyReturnType(QualType RetTy) const990 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
991 if (RetTy->isVoidType())
992 return ABIArgInfo::getIgnore();
993
994 // In the PNaCl ABI we always return records/structures on the stack.
995 if (isAggregateTypeForABI(RetTy))
996 return getNaturalAlignIndirect(RetTy);
997
998 // Treat extended integers as integers if <=64, otherwise pass indirectly.
999 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
1000 if (EIT->getNumBits() > 64)
1001 return getNaturalAlignIndirect(RetTy);
1002 return ABIArgInfo::getDirect();
1003 }
1004
1005 // Treat an enum type as its underlying type.
1006 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1007 RetTy = EnumTy->getDecl()->getIntegerType();
1008
1009 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1010 : ABIArgInfo::getDirect());
1011 }
1012
1013 /// IsX86_MMXType - Return true if this is an MMX type.
IsX86_MMXType(llvm::Type * IRType)1014 bool IsX86_MMXType(llvm::Type *IRType) {
1015 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1016 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1017 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1018 IRType->getScalarSizeInBits() != 64;
1019 }
1020
X86AdjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)1021 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1022 StringRef Constraint,
1023 llvm::Type* Ty) {
1024 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1025 .Cases("y", "&y", "^Ym", true)
1026 .Default(false);
1027 if (IsMMXCons && Ty->isVectorTy()) {
1028 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1029 64) {
1030 // Invalid MMX constraint
1031 return nullptr;
1032 }
1033
1034 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1035 }
1036
1037 // No operation needed
1038 return Ty;
1039 }
1040
1041 /// Returns true if this type can be passed in SSE registers with the
1042 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorTypeForVectorCall(ASTContext & Context,QualType Ty)1043 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1044 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1045 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1046 if (BT->getKind() == BuiltinType::LongDouble) {
1047 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1048 &llvm::APFloat::x87DoubleExtended())
1049 return false;
1050 }
1051 return true;
1052 }
1053 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1054 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1055 // registers specially.
1056 unsigned VecSize = Context.getTypeSize(VT);
1057 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1058 return true;
1059 }
1060 return false;
1061 }
1062
1063 /// Returns true if this aggregate is small enough to be passed in SSE registers
1064 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorCallAggregateSmallEnough(uint64_t NumMembers)1065 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1066 return NumMembers <= 4;
1067 }
1068
1069 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
getDirectX86Hva(llvm::Type * T=nullptr)1070 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1071 auto AI = ABIArgInfo::getDirect(T);
1072 AI.setInReg(true);
1073 AI.setCanBeFlattened(false);
1074 return AI;
1075 }
1076
1077 //===----------------------------------------------------------------------===//
1078 // X86-32 ABI Implementation
1079 //===----------------------------------------------------------------------===//
1080
1081 /// Similar to llvm::CCState, but for Clang.
1082 struct CCState {
CCState__anonc4b339490111::CCState1083 CCState(CGFunctionInfo &FI)
1084 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1085
1086 llvm::SmallBitVector IsPreassigned;
1087 unsigned CC = CallingConv::CC_C;
1088 unsigned FreeRegs = 0;
1089 unsigned FreeSSERegs = 0;
1090 };
1091
1092 /// X86_32ABIInfo - The X86-32 ABI information.
1093 class X86_32ABIInfo : public SwiftABIInfo {
1094 enum Class {
1095 Integer,
1096 Float
1097 };
1098
1099 static const unsigned MinABIStackAlignInBytes = 4;
1100
1101 bool IsDarwinVectorABI;
1102 bool IsRetSmallStructInRegABI;
1103 bool IsWin32StructABI;
1104 bool IsSoftFloatABI;
1105 bool IsMCUABI;
1106 unsigned DefaultNumRegisterParameters;
1107
isRegisterSize(unsigned Size)1108 static bool isRegisterSize(unsigned Size) {
1109 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1110 }
1111
isHomogeneousAggregateBaseType(QualType Ty) const1112 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1113 // FIXME: Assumes vectorcall is in use.
1114 return isX86VectorTypeForVectorCall(getContext(), Ty);
1115 }
1116
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const1117 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1118 uint64_t NumMembers) const override {
1119 // FIXME: Assumes vectorcall is in use.
1120 return isX86VectorCallAggregateSmallEnough(NumMembers);
1121 }
1122
1123 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1124
1125 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1126 /// such that the argument will be passed in memory.
1127 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1128
1129 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1130
1131 /// Return the alignment to use for the given type on the stack.
1132 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1133
1134 Class classify(QualType Ty) const;
1135 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1136 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1137
1138 /// Updates the number of available free registers, returns
1139 /// true if any registers were allocated.
1140 bool updateFreeRegs(QualType Ty, CCState &State) const;
1141
1142 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1143 bool &NeedsPadding) const;
1144 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1145
1146 bool canExpandIndirectArgument(QualType Ty) const;
1147
1148 /// Rewrite the function info so that all memory arguments use
1149 /// inalloca.
1150 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1151
1152 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1153 CharUnits &StackOffset, ABIArgInfo &Info,
1154 QualType Type) const;
1155 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1156
1157 public:
1158
1159 void computeInfo(CGFunctionInfo &FI) const override;
1160 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1161 QualType Ty) const override;
1162
X86_32ABIInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)1163 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1164 bool RetSmallStructInRegABI, bool Win32StructABI,
1165 unsigned NumRegisterParameters, bool SoftFloatABI)
1166 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1167 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1168 IsWin32StructABI(Win32StructABI),
1169 IsSoftFloatABI(SoftFloatABI),
1170 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1171 DefaultNumRegisterParameters(NumRegisterParameters) {}
1172
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const1173 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1174 bool asReturnValue) const override {
1175 // LLVM's x86-32 lowering currently only assigns up to three
1176 // integer registers and three fp registers. Oddly, it'll use up to
1177 // four vector registers for vectors, but those can overlap with the
1178 // scalar registers.
1179 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1180 }
1181
isSwiftErrorInRegister() const1182 bool isSwiftErrorInRegister() const override {
1183 // x86-32 lowering does not support passing swifterror in a register.
1184 return false;
1185 }
1186 };
1187
1188 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1189 public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)1190 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1191 bool RetSmallStructInRegABI, bool Win32StructABI,
1192 unsigned NumRegisterParameters, bool SoftFloatABI)
1193 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1194 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1195 NumRegisterParameters, SoftFloatABI)) {}
1196
1197 static bool isStructReturnInRegABI(
1198 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1199
1200 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1201 CodeGen::CodeGenModule &CGM) const override;
1202
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1203 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1204 // Darwin uses different dwarf register numbers for EH.
1205 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1206 return 4;
1207 }
1208
1209 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1210 llvm::Value *Address) const override;
1211
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const1212 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1213 StringRef Constraint,
1214 llvm::Type* Ty) const override {
1215 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1216 }
1217
1218 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1219 std::string &Constraints,
1220 std::vector<llvm::Type *> &ResultRegTypes,
1221 std::vector<llvm::Type *> &ResultTruncRegTypes,
1222 std::vector<LValue> &ResultRegDests,
1223 std::string &AsmString,
1224 unsigned NumOutputs) const override;
1225
1226 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const1227 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1228 unsigned Sig = (0xeb << 0) | // jmp rel8
1229 (0x06 << 8) | // .+0x08
1230 ('v' << 16) |
1231 ('2' << 24);
1232 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1233 }
1234
getARCRetainAutoreleasedReturnValueMarker() const1235 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1236 return "movl\t%ebp, %ebp"
1237 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1238 }
1239 };
1240
1241 }
1242
1243 /// Rewrite input constraint references after adding some output constraints.
1244 /// In the case where there is one output and one input and we add one output,
1245 /// we need to replace all operand references greater than or equal to 1:
1246 /// mov $0, $1
1247 /// mov eax, $1
1248 /// The result will be:
1249 /// mov $0, $2
1250 /// mov eax, $2
rewriteInputConstraintReferences(unsigned FirstIn,unsigned NumNewOuts,std::string & AsmString)1251 static void rewriteInputConstraintReferences(unsigned FirstIn,
1252 unsigned NumNewOuts,
1253 std::string &AsmString) {
1254 std::string Buf;
1255 llvm::raw_string_ostream OS(Buf);
1256 size_t Pos = 0;
1257 while (Pos < AsmString.size()) {
1258 size_t DollarStart = AsmString.find('$', Pos);
1259 if (DollarStart == std::string::npos)
1260 DollarStart = AsmString.size();
1261 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1262 if (DollarEnd == std::string::npos)
1263 DollarEnd = AsmString.size();
1264 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1265 Pos = DollarEnd;
1266 size_t NumDollars = DollarEnd - DollarStart;
1267 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1268 // We have an operand reference.
1269 size_t DigitStart = Pos;
1270 if (AsmString[DigitStart] == '{') {
1271 OS << '{';
1272 ++DigitStart;
1273 }
1274 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1275 if (DigitEnd == std::string::npos)
1276 DigitEnd = AsmString.size();
1277 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1278 unsigned OperandIndex;
1279 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1280 if (OperandIndex >= FirstIn)
1281 OperandIndex += NumNewOuts;
1282 OS << OperandIndex;
1283 } else {
1284 OS << OperandStr;
1285 }
1286 Pos = DigitEnd;
1287 }
1288 }
1289 AsmString = std::move(OS.str());
1290 }
1291
1292 /// Add output constraints for EAX:EDX because they are return registers.
addReturnRegisterOutputs(CodeGenFunction & CGF,LValue ReturnSlot,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs) const1293 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1294 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1295 std::vector<llvm::Type *> &ResultRegTypes,
1296 std::vector<llvm::Type *> &ResultTruncRegTypes,
1297 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1298 unsigned NumOutputs) const {
1299 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1300
1301 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1302 // larger.
1303 if (!Constraints.empty())
1304 Constraints += ',';
1305 if (RetWidth <= 32) {
1306 Constraints += "={eax}";
1307 ResultRegTypes.push_back(CGF.Int32Ty);
1308 } else {
1309 // Use the 'A' constraint for EAX:EDX.
1310 Constraints += "=A";
1311 ResultRegTypes.push_back(CGF.Int64Ty);
1312 }
1313
1314 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1315 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1316 ResultTruncRegTypes.push_back(CoerceTy);
1317
1318 // Coerce the integer by bitcasting the return slot pointer.
1319 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1320 CoerceTy->getPointerTo()));
1321 ResultRegDests.push_back(ReturnSlot);
1322
1323 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1324 }
1325
1326 /// shouldReturnTypeInRegister - Determine if the given type should be
1327 /// returned in a register (for the Darwin and MCU ABI).
shouldReturnTypeInRegister(QualType Ty,ASTContext & Context) const1328 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1329 ASTContext &Context) const {
1330 uint64_t Size = Context.getTypeSize(Ty);
1331
1332 // For i386, type must be register sized.
1333 // For the MCU ABI, it only needs to be <= 8-byte
1334 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1335 return false;
1336
1337 if (Ty->isVectorType()) {
1338 // 64- and 128- bit vectors inside structures are not returned in
1339 // registers.
1340 if (Size == 64 || Size == 128)
1341 return false;
1342
1343 return true;
1344 }
1345
1346 // If this is a builtin, pointer, enum, complex type, member pointer, or
1347 // member function pointer it is ok.
1348 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1349 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1350 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1351 return true;
1352
1353 // Arrays are treated like records.
1354 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1355 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1356
1357 // Otherwise, it must be a record type.
1358 const RecordType *RT = Ty->getAs<RecordType>();
1359 if (!RT) return false;
1360
1361 // FIXME: Traverse bases here too.
1362
1363 // Structure types are passed in register if all fields would be
1364 // passed in a register.
1365 for (const auto *FD : RT->getDecl()->fields()) {
1366 // Empty fields are ignored.
1367 if (isEmptyField(Context, FD, true))
1368 continue;
1369
1370 // Check fields recursively.
1371 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1372 return false;
1373 }
1374 return true;
1375 }
1376
is32Or64BitBasicType(QualType Ty,ASTContext & Context)1377 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1378 // Treat complex types as the element type.
1379 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1380 Ty = CTy->getElementType();
1381
1382 // Check for a type which we know has a simple scalar argument-passing
1383 // convention without any padding. (We're specifically looking for 32
1384 // and 64-bit integer and integer-equivalents, float, and double.)
1385 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1386 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1387 return false;
1388
1389 uint64_t Size = Context.getTypeSize(Ty);
1390 return Size == 32 || Size == 64;
1391 }
1392
addFieldSizes(ASTContext & Context,const RecordDecl * RD,uint64_t & Size)1393 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1394 uint64_t &Size) {
1395 for (const auto *FD : RD->fields()) {
1396 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1397 // argument is smaller than 32-bits, expanding the struct will create
1398 // alignment padding.
1399 if (!is32Or64BitBasicType(FD->getType(), Context))
1400 return false;
1401
1402 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1403 // how to expand them yet, and the predicate for telling if a bitfield still
1404 // counts as "basic" is more complicated than what we were doing previously.
1405 if (FD->isBitField())
1406 return false;
1407
1408 Size += Context.getTypeSize(FD->getType());
1409 }
1410 return true;
1411 }
1412
addBaseAndFieldSizes(ASTContext & Context,const CXXRecordDecl * RD,uint64_t & Size)1413 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1414 uint64_t &Size) {
1415 // Don't do this if there are any non-empty bases.
1416 for (const CXXBaseSpecifier &Base : RD->bases()) {
1417 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1418 Size))
1419 return false;
1420 }
1421 if (!addFieldSizes(Context, RD, Size))
1422 return false;
1423 return true;
1424 }
1425
1426 /// Test whether an argument type which is to be passed indirectly (on the
1427 /// stack) would have the equivalent layout if it was expanded into separate
1428 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1429 /// optimizations.
canExpandIndirectArgument(QualType Ty) const1430 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1431 // We can only expand structure types.
1432 const RecordType *RT = Ty->getAs<RecordType>();
1433 if (!RT)
1434 return false;
1435 const RecordDecl *RD = RT->getDecl();
1436 uint64_t Size = 0;
1437 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1438 if (!IsWin32StructABI) {
1439 // On non-Windows, we have to conservatively match our old bitcode
1440 // prototypes in order to be ABI-compatible at the bitcode level.
1441 if (!CXXRD->isCLike())
1442 return false;
1443 } else {
1444 // Don't do this for dynamic classes.
1445 if (CXXRD->isDynamicClass())
1446 return false;
1447 }
1448 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1449 return false;
1450 } else {
1451 if (!addFieldSizes(getContext(), RD, Size))
1452 return false;
1453 }
1454
1455 // We can do this if there was no alignment padding.
1456 return Size == getContext().getTypeSize(Ty);
1457 }
1458
getIndirectReturnResult(QualType RetTy,CCState & State) const1459 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1460 // If the return value is indirect, then the hidden argument is consuming one
1461 // integer register.
1462 if (State.FreeRegs) {
1463 --State.FreeRegs;
1464 if (!IsMCUABI)
1465 return getNaturalAlignIndirectInReg(RetTy);
1466 }
1467 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1468 }
1469
classifyReturnType(QualType RetTy,CCState & State) const1470 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1471 CCState &State) const {
1472 if (RetTy->isVoidType())
1473 return ABIArgInfo::getIgnore();
1474
1475 const Type *Base = nullptr;
1476 uint64_t NumElts = 0;
1477 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1478 State.CC == llvm::CallingConv::X86_RegCall) &&
1479 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1480 // The LLVM struct type for such an aggregate should lower properly.
1481 return ABIArgInfo::getDirect();
1482 }
1483
1484 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1485 // On Darwin, some vectors are returned in registers.
1486 if (IsDarwinVectorABI) {
1487 uint64_t Size = getContext().getTypeSize(RetTy);
1488
1489 // 128-bit vectors are a special case; they are returned in
1490 // registers and we need to make sure to pick a type the LLVM
1491 // backend will like.
1492 if (Size == 128)
1493 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1494 llvm::Type::getInt64Ty(getVMContext()), 2));
1495
1496 // Always return in register if it fits in a general purpose
1497 // register, or if it is 64 bits and has a single element.
1498 if ((Size == 8 || Size == 16 || Size == 32) ||
1499 (Size == 64 && VT->getNumElements() == 1))
1500 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1501 Size));
1502
1503 return getIndirectReturnResult(RetTy, State);
1504 }
1505
1506 return ABIArgInfo::getDirect();
1507 }
1508
1509 if (isAggregateTypeForABI(RetTy)) {
1510 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1511 // Structures with flexible arrays are always indirect.
1512 if (RT->getDecl()->hasFlexibleArrayMember())
1513 return getIndirectReturnResult(RetTy, State);
1514 }
1515
1516 // If specified, structs and unions are always indirect.
1517 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1518 return getIndirectReturnResult(RetTy, State);
1519
1520 // Ignore empty structs/unions.
1521 if (isEmptyRecord(getContext(), RetTy, true))
1522 return ABIArgInfo::getIgnore();
1523
1524 // Small structures which are register sized are generally returned
1525 // in a register.
1526 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1527 uint64_t Size = getContext().getTypeSize(RetTy);
1528
1529 // As a special-case, if the struct is a "single-element" struct, and
1530 // the field is of type "float" or "double", return it in a
1531 // floating-point register. (MSVC does not apply this special case.)
1532 // We apply a similar transformation for pointer types to improve the
1533 // quality of the generated IR.
1534 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1535 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1536 || SeltTy->hasPointerRepresentation())
1537 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1538
1539 // FIXME: We should be able to narrow this integer in cases with dead
1540 // padding.
1541 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1542 }
1543
1544 return getIndirectReturnResult(RetTy, State);
1545 }
1546
1547 // Treat an enum type as its underlying type.
1548 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1549 RetTy = EnumTy->getDecl()->getIntegerType();
1550
1551 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1552 if (EIT->getNumBits() > 64)
1553 return getIndirectReturnResult(RetTy, State);
1554
1555 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1556 : ABIArgInfo::getDirect());
1557 }
1558
isSIMDVectorType(ASTContext & Context,QualType Ty)1559 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1560 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1561 }
1562
isRecordWithSIMDVectorType(ASTContext & Context,QualType Ty)1563 static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1564 const RecordType *RT = Ty->getAs<RecordType>();
1565 if (!RT)
1566 return 0;
1567 const RecordDecl *RD = RT->getDecl();
1568
1569 // If this is a C++ record, check the bases first.
1570 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1571 for (const auto &I : CXXRD->bases())
1572 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1573 return false;
1574
1575 for (const auto *i : RD->fields()) {
1576 QualType FT = i->getType();
1577
1578 if (isSIMDVectorType(Context, FT))
1579 return true;
1580
1581 if (isRecordWithSIMDVectorType(Context, FT))
1582 return true;
1583 }
1584
1585 return false;
1586 }
1587
getTypeStackAlignInBytes(QualType Ty,unsigned Align) const1588 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1589 unsigned Align) const {
1590 // Otherwise, if the alignment is less than or equal to the minimum ABI
1591 // alignment, just use the default; the backend will handle this.
1592 if (Align <= MinABIStackAlignInBytes)
1593 return 0; // Use default alignment.
1594
1595 // On non-Darwin, the stack type alignment is always 4.
1596 if (!IsDarwinVectorABI) {
1597 // Set explicit alignment, since we may need to realign the top.
1598 return MinABIStackAlignInBytes;
1599 }
1600
1601 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1602 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1603 isRecordWithSIMDVectorType(getContext(), Ty)))
1604 return 16;
1605
1606 return MinABIStackAlignInBytes;
1607 }
1608
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const1609 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1610 CCState &State) const {
1611 if (!ByVal) {
1612 if (State.FreeRegs) {
1613 --State.FreeRegs; // Non-byval indirects just use one pointer.
1614 if (!IsMCUABI)
1615 return getNaturalAlignIndirectInReg(Ty);
1616 }
1617 return getNaturalAlignIndirect(Ty, false);
1618 }
1619
1620 // Compute the byval alignment.
1621 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1622 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1623 if (StackAlign == 0)
1624 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1625
1626 // If the stack alignment is less than the type alignment, realign the
1627 // argument.
1628 bool Realign = TypeAlign > StackAlign;
1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1630 /*ByVal=*/true, Realign);
1631 }
1632
classify(QualType Ty) const1633 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1634 const Type *T = isSingleElementStruct(Ty, getContext());
1635 if (!T)
1636 T = Ty.getTypePtr();
1637
1638 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1639 BuiltinType::Kind K = BT->getKind();
1640 if (K == BuiltinType::Float || K == BuiltinType::Double)
1641 return Float;
1642 }
1643 return Integer;
1644 }
1645
updateFreeRegs(QualType Ty,CCState & State) const1646 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1647 if (!IsSoftFloatABI) {
1648 Class C = classify(Ty);
1649 if (C == Float)
1650 return false;
1651 }
1652
1653 unsigned Size = getContext().getTypeSize(Ty);
1654 unsigned SizeInRegs = (Size + 31) / 32;
1655
1656 if (SizeInRegs == 0)
1657 return false;
1658
1659 if (!IsMCUABI) {
1660 if (SizeInRegs > State.FreeRegs) {
1661 State.FreeRegs = 0;
1662 return false;
1663 }
1664 } else {
1665 // The MCU psABI allows passing parameters in-reg even if there are
1666 // earlier parameters that are passed on the stack. Also,
1667 // it does not allow passing >8-byte structs in-register,
1668 // even if there are 3 free registers available.
1669 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1670 return false;
1671 }
1672
1673 State.FreeRegs -= SizeInRegs;
1674 return true;
1675 }
1676
shouldAggregateUseDirect(QualType Ty,CCState & State,bool & InReg,bool & NeedsPadding) const1677 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1678 bool &InReg,
1679 bool &NeedsPadding) const {
1680 // On Windows, aggregates other than HFAs are never passed in registers, and
1681 // they do not consume register slots. Homogenous floating-point aggregates
1682 // (HFAs) have already been dealt with at this point.
1683 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1684 return false;
1685
1686 NeedsPadding = false;
1687 InReg = !IsMCUABI;
1688
1689 if (!updateFreeRegs(Ty, State))
1690 return false;
1691
1692 if (IsMCUABI)
1693 return true;
1694
1695 if (State.CC == llvm::CallingConv::X86_FastCall ||
1696 State.CC == llvm::CallingConv::X86_VectorCall ||
1697 State.CC == llvm::CallingConv::X86_RegCall) {
1698 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1699 NeedsPadding = true;
1700
1701 return false;
1702 }
1703
1704 return true;
1705 }
1706
shouldPrimitiveUseInReg(QualType Ty,CCState & State) const1707 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1708 if (!updateFreeRegs(Ty, State))
1709 return false;
1710
1711 if (IsMCUABI)
1712 return false;
1713
1714 if (State.CC == llvm::CallingConv::X86_FastCall ||
1715 State.CC == llvm::CallingConv::X86_VectorCall ||
1716 State.CC == llvm::CallingConv::X86_RegCall) {
1717 if (getContext().getTypeSize(Ty) > 32)
1718 return false;
1719
1720 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1721 Ty->isReferenceType());
1722 }
1723
1724 return true;
1725 }
1726
runVectorCallFirstPass(CGFunctionInfo & FI,CCState & State) const1727 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1728 // Vectorcall x86 works subtly different than in x64, so the format is
1729 // a bit different than the x64 version. First, all vector types (not HVAs)
1730 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1731 // This differs from the x64 implementation, where the first 6 by INDEX get
1732 // registers.
1733 // In the second pass over the arguments, HVAs are passed in the remaining
1734 // vector registers if possible, or indirectly by address. The address will be
1735 // passed in ECX/EDX if available. Any other arguments are passed according to
1736 // the usual fastcall rules.
1737 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1738 for (int I = 0, E = Args.size(); I < E; ++I) {
1739 const Type *Base = nullptr;
1740 uint64_t NumElts = 0;
1741 const QualType &Ty = Args[I].type;
1742 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1743 isHomogeneousAggregate(Ty, Base, NumElts)) {
1744 if (State.FreeSSERegs >= NumElts) {
1745 State.FreeSSERegs -= NumElts;
1746 Args[I].info = ABIArgInfo::getDirectInReg();
1747 State.IsPreassigned.set(I);
1748 }
1749 }
1750 }
1751 }
1752
classifyArgumentType(QualType Ty,CCState & State) const1753 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1754 CCState &State) const {
1755 // FIXME: Set alignment on indirect arguments.
1756 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1757 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1758 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1759
1760 Ty = useFirstFieldIfTransparentUnion(Ty);
1761 TypeInfo TI = getContext().getTypeInfo(Ty);
1762
1763 // Check with the C++ ABI first.
1764 const RecordType *RT = Ty->getAs<RecordType>();
1765 if (RT) {
1766 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1767 if (RAA == CGCXXABI::RAA_Indirect) {
1768 return getIndirectResult(Ty, false, State);
1769 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1770 // The field index doesn't matter, we'll fix it up later.
1771 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1772 }
1773 }
1774
1775 // Regcall uses the concept of a homogenous vector aggregate, similar
1776 // to other targets.
1777 const Type *Base = nullptr;
1778 uint64_t NumElts = 0;
1779 if ((IsRegCall || IsVectorCall) &&
1780 isHomogeneousAggregate(Ty, Base, NumElts)) {
1781 if (State.FreeSSERegs >= NumElts) {
1782 State.FreeSSERegs -= NumElts;
1783
1784 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1785 // does.
1786 if (IsVectorCall)
1787 return getDirectX86Hva();
1788
1789 if (Ty->isBuiltinType() || Ty->isVectorType())
1790 return ABIArgInfo::getDirect();
1791 return ABIArgInfo::getExpand();
1792 }
1793 return getIndirectResult(Ty, /*ByVal=*/false, State);
1794 }
1795
1796 if (isAggregateTypeForABI(Ty)) {
1797 // Structures with flexible arrays are always indirect.
1798 // FIXME: This should not be byval!
1799 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1800 return getIndirectResult(Ty, true, State);
1801
1802 // Ignore empty structs/unions on non-Windows.
1803 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1804 return ABIArgInfo::getIgnore();
1805
1806 llvm::LLVMContext &LLVMContext = getVMContext();
1807 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1808 bool NeedsPadding = false;
1809 bool InReg;
1810 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1811 unsigned SizeInRegs = (TI.Width + 31) / 32;
1812 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1813 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1814 if (InReg)
1815 return ABIArgInfo::getDirectInReg(Result);
1816 else
1817 return ABIArgInfo::getDirect(Result);
1818 }
1819 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1820
1821 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1822 // added in MSVC 2015.
1823 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1824 return getIndirectResult(Ty, /*ByVal=*/false, State);
1825
1826 // Expand small (<= 128-bit) record types when we know that the stack layout
1827 // of those arguments will match the struct. This is important because the
1828 // LLVM backend isn't smart enough to remove byval, which inhibits many
1829 // optimizations.
1830 // Don't do this for the MCU if there are still free integer registers
1831 // (see X86_64 ABI for full explanation).
1832 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1833 canExpandIndirectArgument(Ty))
1834 return ABIArgInfo::getExpandWithPadding(
1835 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1836
1837 return getIndirectResult(Ty, true, State);
1838 }
1839
1840 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1841 // On Windows, vectors are passed directly if registers are available, or
1842 // indirectly if not. This avoids the need to align argument memory. Pass
1843 // user-defined vector types larger than 512 bits indirectly for simplicity.
1844 if (IsWin32StructABI) {
1845 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1846 --State.FreeSSERegs;
1847 return ABIArgInfo::getDirectInReg();
1848 }
1849 return getIndirectResult(Ty, /*ByVal=*/false, State);
1850 }
1851
1852 // On Darwin, some vectors are passed in memory, we handle this by passing
1853 // it as an i8/i16/i32/i64.
1854 if (IsDarwinVectorABI) {
1855 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1856 (TI.Width == 64 && VT->getNumElements() == 1))
1857 return ABIArgInfo::getDirect(
1858 llvm::IntegerType::get(getVMContext(), TI.Width));
1859 }
1860
1861 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1862 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1863
1864 return ABIArgInfo::getDirect();
1865 }
1866
1867
1868 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1869 Ty = EnumTy->getDecl()->getIntegerType();
1870
1871 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1872
1873 if (isPromotableIntegerTypeForABI(Ty)) {
1874 if (InReg)
1875 return ABIArgInfo::getExtendInReg(Ty);
1876 return ABIArgInfo::getExtend(Ty);
1877 }
1878
1879 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1880 if (EIT->getNumBits() <= 64) {
1881 if (InReg)
1882 return ABIArgInfo::getDirectInReg();
1883 return ABIArgInfo::getDirect();
1884 }
1885 return getIndirectResult(Ty, /*ByVal=*/false, State);
1886 }
1887
1888 if (InReg)
1889 return ABIArgInfo::getDirectInReg();
1890 return ABIArgInfo::getDirect();
1891 }
1892
computeInfo(CGFunctionInfo & FI) const1893 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1894 CCState State(FI);
1895 if (IsMCUABI)
1896 State.FreeRegs = 3;
1897 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1898 State.FreeRegs = 2;
1899 State.FreeSSERegs = 3;
1900 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1901 State.FreeRegs = 2;
1902 State.FreeSSERegs = 6;
1903 } else if (FI.getHasRegParm())
1904 State.FreeRegs = FI.getRegParm();
1905 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1906 State.FreeRegs = 5;
1907 State.FreeSSERegs = 8;
1908 } else if (IsWin32StructABI) {
1909 // Since MSVC 2015, the first three SSE vectors have been passed in
1910 // registers. The rest are passed indirectly.
1911 State.FreeRegs = DefaultNumRegisterParameters;
1912 State.FreeSSERegs = 3;
1913 } else
1914 State.FreeRegs = DefaultNumRegisterParameters;
1915
1916 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1917 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1918 } else if (FI.getReturnInfo().isIndirect()) {
1919 // The C++ ABI is not aware of register usage, so we have to check if the
1920 // return value was sret and put it in a register ourselves if appropriate.
1921 if (State.FreeRegs) {
1922 --State.FreeRegs; // The sret parameter consumes a register.
1923 if (!IsMCUABI)
1924 FI.getReturnInfo().setInReg(true);
1925 }
1926 }
1927
1928 // The chain argument effectively gives us another free register.
1929 if (FI.isChainCall())
1930 ++State.FreeRegs;
1931
1932 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1933 // arguments to XMM registers as available.
1934 if (State.CC == llvm::CallingConv::X86_VectorCall)
1935 runVectorCallFirstPass(FI, State);
1936
1937 bool UsedInAlloca = false;
1938 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1939 for (int I = 0, E = Args.size(); I < E; ++I) {
1940 // Skip arguments that have already been assigned.
1941 if (State.IsPreassigned.test(I))
1942 continue;
1943
1944 Args[I].info = classifyArgumentType(Args[I].type, State);
1945 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1946 }
1947
1948 // If we needed to use inalloca for any argument, do a second pass and rewrite
1949 // all the memory arguments to use inalloca.
1950 if (UsedInAlloca)
1951 rewriteWithInAlloca(FI);
1952 }
1953
1954 void
addFieldToArgStruct(SmallVector<llvm::Type *,6> & FrameFields,CharUnits & StackOffset,ABIArgInfo & Info,QualType Type) const1955 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1956 CharUnits &StackOffset, ABIArgInfo &Info,
1957 QualType Type) const {
1958 // Arguments are always 4-byte-aligned.
1959 CharUnits WordSize = CharUnits::fromQuantity(4);
1960 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
1961
1962 // sret pointers and indirect things will require an extra pointer
1963 // indirection, unless they are byval. Most things are byval, and will not
1964 // require this indirection.
1965 bool IsIndirect = false;
1966 if (Info.isIndirect() && !Info.getIndirectByVal())
1967 IsIndirect = true;
1968 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1969 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1970 if (IsIndirect)
1971 LLTy = LLTy->getPointerTo(0);
1972 FrameFields.push_back(LLTy);
1973 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1974
1975 // Insert padding bytes to respect alignment.
1976 CharUnits FieldEnd = StackOffset;
1977 StackOffset = FieldEnd.alignTo(WordSize);
1978 if (StackOffset != FieldEnd) {
1979 CharUnits NumBytes = StackOffset - FieldEnd;
1980 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1981 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1982 FrameFields.push_back(Ty);
1983 }
1984 }
1985
isArgInAlloca(const ABIArgInfo & Info)1986 static bool isArgInAlloca(const ABIArgInfo &Info) {
1987 // Leave ignored and inreg arguments alone.
1988 switch (Info.getKind()) {
1989 case ABIArgInfo::InAlloca:
1990 return true;
1991 case ABIArgInfo::Ignore:
1992 case ABIArgInfo::IndirectAliased:
1993 return false;
1994 case ABIArgInfo::Indirect:
1995 case ABIArgInfo::Direct:
1996 case ABIArgInfo::Extend:
1997 return !Info.getInReg();
1998 case ABIArgInfo::Expand:
1999 case ABIArgInfo::CoerceAndExpand:
2000 // These are aggregate types which are never passed in registers when
2001 // inalloca is involved.
2002 return true;
2003 }
2004 llvm_unreachable("invalid enum");
2005 }
2006
rewriteWithInAlloca(CGFunctionInfo & FI) const2007 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2008 assert(IsWin32StructABI && "inalloca only supported on win32");
2009
2010 // Build a packed struct type for all of the arguments in memory.
2011 SmallVector<llvm::Type *, 6> FrameFields;
2012
2013 // The stack alignment is always 4.
2014 CharUnits StackAlign = CharUnits::fromQuantity(4);
2015
2016 CharUnits StackOffset;
2017 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2018
2019 // Put 'this' into the struct before 'sret', if necessary.
2020 bool IsThisCall =
2021 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2022 ABIArgInfo &Ret = FI.getReturnInfo();
2023 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2024 isArgInAlloca(I->info)) {
2025 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2026 ++I;
2027 }
2028
2029 // Put the sret parameter into the inalloca struct if it's in memory.
2030 if (Ret.isIndirect() && !Ret.getInReg()) {
2031 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2032 // On Windows, the hidden sret parameter is always returned in eax.
2033 Ret.setInAllocaSRet(IsWin32StructABI);
2034 }
2035
2036 // Skip the 'this' parameter in ecx.
2037 if (IsThisCall)
2038 ++I;
2039
2040 // Put arguments passed in memory into the struct.
2041 for (; I != E; ++I) {
2042 if (isArgInAlloca(I->info))
2043 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2044 }
2045
2046 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2047 /*isPacked=*/true),
2048 StackAlign);
2049 }
2050
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const2051 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2052 Address VAListAddr, QualType Ty) const {
2053
2054 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2055
2056 // x86-32 changes the alignment of certain arguments on the stack.
2057 //
2058 // Just messing with TypeInfo like this works because we never pass
2059 // anything indirectly.
2060 TypeInfo.Align = CharUnits::fromQuantity(
2061 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
2062
2063 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2064 TypeInfo, CharUnits::fromQuantity(4),
2065 /*AllowHigherAlign*/ true);
2066 }
2067
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)2068 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2069 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2070 assert(Triple.getArch() == llvm::Triple::x86);
2071
2072 switch (Opts.getStructReturnConvention()) {
2073 case CodeGenOptions::SRCK_Default:
2074 break;
2075 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2076 return false;
2077 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2078 return true;
2079 }
2080
2081 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2082 return true;
2083
2084 switch (Triple.getOS()) {
2085 case llvm::Triple::DragonFly:
2086 case llvm::Triple::FreeBSD:
2087 case llvm::Triple::OpenBSD:
2088 case llvm::Triple::Win32:
2089 return true;
2090 default:
2091 return false;
2092 }
2093 }
2094
addX86InterruptAttrs(const FunctionDecl * FD,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM)2095 static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
2096 CodeGen::CodeGenModule &CGM) {
2097 if (!FD->hasAttr<AnyX86InterruptAttr>())
2098 return;
2099
2100 llvm::Function *Fn = cast<llvm::Function>(GV);
2101 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2102 if (FD->getNumParams() == 0)
2103 return;
2104
2105 auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
2106 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
2107 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2108 Fn->getContext(), ByValTy);
2109 Fn->addParamAttr(0, NewAttr);
2110 }
2111
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2112 void X86_32TargetCodeGenInfo::setTargetAttributes(
2113 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2114 if (GV->isDeclaration())
2115 return;
2116 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2117 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2118 llvm::Function *Fn = cast<llvm::Function>(GV);
2119 Fn->addFnAttr("stackrealign");
2120 }
2121
2122 addX86InterruptAttrs(FD, GV, CGM);
2123 }
2124 }
2125
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2126 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2127 CodeGen::CodeGenFunction &CGF,
2128 llvm::Value *Address) const {
2129 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2130
2131 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2132
2133 // 0-7 are the eight integer registers; the order is different
2134 // on Darwin (for EH), but the range is the same.
2135 // 8 is %eip.
2136 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2137
2138 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2139 // 12-16 are st(0..4). Not sure why we stop at 4.
2140 // These have size 16, which is sizeof(long double) on
2141 // platforms with 8-byte alignment for that type.
2142 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2143 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2144
2145 } else {
2146 // 9 is %eflags, which doesn't get a size on Darwin for some
2147 // reason.
2148 Builder.CreateAlignedStore(
2149 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2150 CharUnits::One());
2151
2152 // 11-16 are st(0..5). Not sure why we stop at 5.
2153 // These have size 12, which is sizeof(long double) on
2154 // platforms with 4-byte alignment for that type.
2155 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2156 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2157 }
2158
2159 return false;
2160 }
2161
2162 //===----------------------------------------------------------------------===//
2163 // X86-64 ABI Implementation
2164 //===----------------------------------------------------------------------===//
2165
2166
2167 namespace {
2168 /// The AVX ABI level for X86 targets.
2169 enum class X86AVXABILevel {
2170 None,
2171 AVX,
2172 AVX512
2173 };
2174
2175 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel)2176 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2177 switch (AVXLevel) {
2178 case X86AVXABILevel::AVX512:
2179 return 512;
2180 case X86AVXABILevel::AVX:
2181 return 256;
2182 case X86AVXABILevel::None:
2183 return 128;
2184 }
2185 llvm_unreachable("Unknown AVXLevel");
2186 }
2187
2188 /// X86_64ABIInfo - The X86_64 ABI information.
2189 class X86_64ABIInfo : public SwiftABIInfo {
2190 enum Class {
2191 Integer = 0,
2192 SSE,
2193 SSEUp,
2194 X87,
2195 X87Up,
2196 ComplexX87,
2197 NoClass,
2198 Memory
2199 };
2200
2201 /// merge - Implement the X86_64 ABI merging algorithm.
2202 ///
2203 /// Merge an accumulating classification \arg Accum with a field
2204 /// classification \arg Field.
2205 ///
2206 /// \param Accum - The accumulating classification. This should
2207 /// always be either NoClass or the result of a previous merge
2208 /// call. In addition, this should never be Memory (the caller
2209 /// should just return Memory for the aggregate).
2210 static Class merge(Class Accum, Class Field);
2211
2212 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2213 ///
2214 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2215 /// final MEMORY or SSE classes when necessary.
2216 ///
2217 /// \param AggregateSize - The size of the current aggregate in
2218 /// the classification process.
2219 ///
2220 /// \param Lo - The classification for the parts of the type
2221 /// residing in the low word of the containing object.
2222 ///
2223 /// \param Hi - The classification for the parts of the type
2224 /// residing in the higher words of the containing object.
2225 ///
2226 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2227
2228 /// classify - Determine the x86_64 register classes in which the
2229 /// given type T should be passed.
2230 ///
2231 /// \param Lo - The classification for the parts of the type
2232 /// residing in the low word of the containing object.
2233 ///
2234 /// \param Hi - The classification for the parts of the type
2235 /// residing in the high word of the containing object.
2236 ///
2237 /// \param OffsetBase - The bit offset of this type in the
2238 /// containing object. Some parameters are classified different
2239 /// depending on whether they straddle an eightbyte boundary.
2240 ///
2241 /// \param isNamedArg - Whether the argument in question is a "named"
2242 /// argument, as used in AMD64-ABI 3.5.7.
2243 ///
2244 /// If a word is unused its result will be NoClass; if a type should
2245 /// be passed in Memory then at least the classification of \arg Lo
2246 /// will be Memory.
2247 ///
2248 /// The \arg Lo class will be NoClass iff the argument is ignored.
2249 ///
2250 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2251 /// also be ComplexX87.
2252 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2253 bool isNamedArg) const;
2254
2255 llvm::Type *GetByteVectorType(QualType Ty) const;
2256 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2257 unsigned IROffset, QualType SourceTy,
2258 unsigned SourceOffset) const;
2259 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2260 unsigned IROffset, QualType SourceTy,
2261 unsigned SourceOffset) const;
2262
2263 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2264 /// such that the argument will be returned in memory.
2265 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2266
2267 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2268 /// such that the argument will be passed in memory.
2269 ///
2270 /// \param freeIntRegs - The number of free integer registers remaining
2271 /// available.
2272 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2273
2274 ABIArgInfo classifyReturnType(QualType RetTy) const;
2275
2276 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2277 unsigned &neededInt, unsigned &neededSSE,
2278 bool isNamedArg) const;
2279
2280 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2281 unsigned &NeededSSE) const;
2282
2283 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2284 unsigned &NeededSSE) const;
2285
2286 bool IsIllegalVectorType(QualType Ty) const;
2287
2288 /// The 0.98 ABI revision clarified a lot of ambiguities,
2289 /// unfortunately in ways that were not always consistent with
2290 /// certain previous compilers. In particular, platforms which
2291 /// required strict binary compatibility with older versions of GCC
2292 /// may need to exempt themselves.
honorsRevision0_98() const2293 bool honorsRevision0_98() const {
2294 return !getTarget().getTriple().isOSDarwin();
2295 }
2296
2297 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2298 /// classify it as INTEGER (for compatibility with older clang compilers).
classifyIntegerMMXAsSSE() const2299 bool classifyIntegerMMXAsSSE() const {
2300 // Clang <= 3.8 did not do this.
2301 if (getContext().getLangOpts().getClangABICompat() <=
2302 LangOptions::ClangABI::Ver3_8)
2303 return false;
2304
2305 const llvm::Triple &Triple = getTarget().getTriple();
2306 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2307 return false;
2308 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2309 return false;
2310 return true;
2311 }
2312
2313 // GCC classifies vectors of __int128 as memory.
passInt128VectorsInMem() const2314 bool passInt128VectorsInMem() const {
2315 // Clang <= 9.0 did not do this.
2316 if (getContext().getLangOpts().getClangABICompat() <=
2317 LangOptions::ClangABI::Ver9)
2318 return false;
2319
2320 const llvm::Triple &T = getTarget().getTriple();
2321 return T.isOSLinux() || T.isOSNetBSD();
2322 }
2323
2324 X86AVXABILevel AVXLevel;
2325 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2326 // 64-bit hardware.
2327 bool Has64BitPointers;
2328
2329 public:
X86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2330 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2331 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2332 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2333 }
2334
isPassedUsingAVXType(QualType type) const2335 bool isPassedUsingAVXType(QualType type) const {
2336 unsigned neededInt, neededSSE;
2337 // The freeIntRegs argument doesn't matter here.
2338 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2339 /*isNamedArg*/true);
2340 if (info.isDirect()) {
2341 llvm::Type *ty = info.getCoerceToType();
2342 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2343 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2344 }
2345 return false;
2346 }
2347
2348 void computeInfo(CGFunctionInfo &FI) const override;
2349
2350 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2351 QualType Ty) const override;
2352 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2353 QualType Ty) const override;
2354
has64BitPointers() const2355 bool has64BitPointers() const {
2356 return Has64BitPointers;
2357 }
2358
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const2359 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2360 bool asReturnValue) const override {
2361 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2362 }
isSwiftErrorInRegister() const2363 bool isSwiftErrorInRegister() const override {
2364 return true;
2365 }
2366 };
2367
2368 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2369 class WinX86_64ABIInfo : public SwiftABIInfo {
2370 public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2371 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2372 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2373 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2374
2375 void computeInfo(CGFunctionInfo &FI) const override;
2376
2377 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2378 QualType Ty) const override;
2379
isHomogeneousAggregateBaseType(QualType Ty) const2380 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2381 // FIXME: Assumes vectorcall is in use.
2382 return isX86VectorTypeForVectorCall(getContext(), Ty);
2383 }
2384
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const2385 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2386 uint64_t NumMembers) const override {
2387 // FIXME: Assumes vectorcall is in use.
2388 return isX86VectorCallAggregateSmallEnough(NumMembers);
2389 }
2390
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const2391 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2392 bool asReturnValue) const override {
2393 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2394 }
2395
isSwiftErrorInRegister() const2396 bool isSwiftErrorInRegister() const override {
2397 return true;
2398 }
2399
2400 private:
2401 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2402 bool IsVectorCall, bool IsRegCall) const;
2403 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
2404 const ABIArgInfo ¤t) const;
2405
2406 X86AVXABILevel AVXLevel;
2407
2408 bool IsMingw64;
2409 };
2410
2411 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2412 public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2413 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2414 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2415
getABIInfo() const2416 const X86_64ABIInfo &getABIInfo() const {
2417 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2418 }
2419
2420 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2421 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
markARCOptimizedReturnCallsAsNoTail() const2422 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
2423
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2424 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2425 return 7;
2426 }
2427
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2428 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2429 llvm::Value *Address) const override {
2430 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2431
2432 // 0-15 are the 16 integer registers.
2433 // 16 is %rip.
2434 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2435 return false;
2436 }
2437
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const2438 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2439 StringRef Constraint,
2440 llvm::Type* Ty) const override {
2441 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2442 }
2443
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const2444 bool isNoProtoCallVariadic(const CallArgList &args,
2445 const FunctionNoProtoType *fnType) const override {
2446 // The default CC on x86-64 sets %al to the number of SSA
2447 // registers used, and GCC sets this when calling an unprototyped
2448 // function, so we override the default behavior. However, don't do
2449 // that when AVX types are involved: the ABI explicitly states it is
2450 // undefined, and it doesn't work in practice because of how the ABI
2451 // defines varargs anyway.
2452 if (fnType->getCallConv() == CC_C) {
2453 bool HasAVXType = false;
2454 for (CallArgList::const_iterator
2455 it = args.begin(), ie = args.end(); it != ie; ++it) {
2456 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2457 HasAVXType = true;
2458 break;
2459 }
2460 }
2461
2462 if (!HasAVXType)
2463 return true;
2464 }
2465
2466 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2467 }
2468
2469 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const2470 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2471 unsigned Sig = (0xeb << 0) | // jmp rel8
2472 (0x06 << 8) | // .+0x08
2473 ('v' << 16) |
2474 ('2' << 24);
2475 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2476 }
2477
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2478 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2479 CodeGen::CodeGenModule &CGM) const override {
2480 if (GV->isDeclaration())
2481 return;
2482 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2483 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2484 llvm::Function *Fn = cast<llvm::Function>(GV);
2485 Fn->addFnAttr("stackrealign");
2486 }
2487
2488 addX86InterruptAttrs(FD, GV, CGM);
2489 }
2490 }
2491
2492 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2493 const FunctionDecl *Caller,
2494 const FunctionDecl *Callee,
2495 const CallArgList &Args) const override;
2496 };
2497
initFeatureMaps(const ASTContext & Ctx,llvm::StringMap<bool> & CallerMap,const FunctionDecl * Caller,llvm::StringMap<bool> & CalleeMap,const FunctionDecl * Callee)2498 static void initFeatureMaps(const ASTContext &Ctx,
2499 llvm::StringMap<bool> &CallerMap,
2500 const FunctionDecl *Caller,
2501 llvm::StringMap<bool> &CalleeMap,
2502 const FunctionDecl *Callee) {
2503 if (CalleeMap.empty() && CallerMap.empty()) {
2504 // The caller is potentially nullptr in the case where the call isn't in a
2505 // function. In this case, the getFunctionFeatureMap ensures we just get
2506 // the TU level setting (since it cannot be modified by 'target'..
2507 Ctx.getFunctionFeatureMap(CallerMap, Caller);
2508 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2509 }
2510 }
2511
checkAVXParamFeature(DiagnosticsEngine & Diag,SourceLocation CallLoc,const llvm::StringMap<bool> & CallerMap,const llvm::StringMap<bool> & CalleeMap,QualType Ty,StringRef Feature,bool IsArgument)2512 static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2513 SourceLocation CallLoc,
2514 const llvm::StringMap<bool> &CallerMap,
2515 const llvm::StringMap<bool> &CalleeMap,
2516 QualType Ty, StringRef Feature,
2517 bool IsArgument) {
2518 bool CallerHasFeat = CallerMap.lookup(Feature);
2519 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2520 if (!CallerHasFeat && !CalleeHasFeat)
2521 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2522 << IsArgument << Ty << Feature;
2523
2524 // Mixing calling conventions here is very clearly an error.
2525 if (!CallerHasFeat || !CalleeHasFeat)
2526 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2527 << IsArgument << Ty << Feature;
2528
2529 // Else, both caller and callee have the required feature, so there is no need
2530 // to diagnose.
2531 return false;
2532 }
2533
checkAVXParam(DiagnosticsEngine & Diag,ASTContext & Ctx,SourceLocation CallLoc,const llvm::StringMap<bool> & CallerMap,const llvm::StringMap<bool> & CalleeMap,QualType Ty,bool IsArgument)2534 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2535 SourceLocation CallLoc,
2536 const llvm::StringMap<bool> &CallerMap,
2537 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2538 bool IsArgument) {
2539 uint64_t Size = Ctx.getTypeSize(Ty);
2540 if (Size > 256)
2541 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2542 "avx512f", IsArgument);
2543
2544 if (Size > 128)
2545 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2546 IsArgument);
2547
2548 return false;
2549 }
2550
checkFunctionCallABI(CodeGenModule & CGM,SourceLocation CallLoc,const FunctionDecl * Caller,const FunctionDecl * Callee,const CallArgList & Args) const2551 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2552 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2553 const FunctionDecl *Callee, const CallArgList &Args) const {
2554 llvm::StringMap<bool> CallerMap;
2555 llvm::StringMap<bool> CalleeMap;
2556 unsigned ArgIndex = 0;
2557
2558 // We need to loop through the actual call arguments rather than the the
2559 // function's parameters, in case this variadic.
2560 for (const CallArg &Arg : Args) {
2561 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2562 // additionally changes how vectors >256 in size are passed. Like GCC, we
2563 // warn when a function is called with an argument where this will change.
2564 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2565 // the caller and callee features are mismatched.
2566 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2567 // change its ABI with attribute-target after this call.
2568 if (Arg.getType()->isVectorType() &&
2569 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2570 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2571 QualType Ty = Arg.getType();
2572 // The CallArg seems to have desugared the type already, so for clearer
2573 // diagnostics, replace it with the type in the FunctionDecl if possible.
2574 if (ArgIndex < Callee->getNumParams())
2575 Ty = Callee->getParamDecl(ArgIndex)->getType();
2576
2577 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2578 CalleeMap, Ty, /*IsArgument*/ true))
2579 return;
2580 }
2581 ++ArgIndex;
2582 }
2583
2584 // Check return always, as we don't have a good way of knowing in codegen
2585 // whether this value is used, tail-called, etc.
2586 if (Callee->getReturnType()->isVectorType() &&
2587 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2588 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2589 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2590 CalleeMap, Callee->getReturnType(),
2591 /*IsArgument*/ false);
2592 }
2593 }
2594
qualifyWindowsLibrary(llvm::StringRef Lib)2595 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2596 // If the argument does not end in .lib, automatically add the suffix.
2597 // If the argument contains a space, enclose it in quotes.
2598 // This matches the behavior of MSVC.
2599 bool Quote = (Lib.find(' ') != StringRef::npos);
2600 std::string ArgStr = Quote ? "\"" : "";
2601 ArgStr += Lib;
2602 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2603 ArgStr += ".lib";
2604 ArgStr += Quote ? "\"" : "";
2605 return ArgStr;
2606 }
2607
2608 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2609 public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters)2610 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2611 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2612 unsigned NumRegisterParameters)
2613 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2614 Win32StructABI, NumRegisterParameters, false) {}
2615
2616 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2617 CodeGen::CodeGenModule &CGM) const override;
2618
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2619 void getDependentLibraryOption(llvm::StringRef Lib,
2620 llvm::SmallString<24> &Opt) const override {
2621 Opt = "/DEFAULTLIB:";
2622 Opt += qualifyWindowsLibrary(Lib);
2623 }
2624
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2625 void getDetectMismatchOption(llvm::StringRef Name,
2626 llvm::StringRef Value,
2627 llvm::SmallString<32> &Opt) const override {
2628 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2629 }
2630 };
2631
addStackProbeTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM)2632 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2633 CodeGen::CodeGenModule &CGM) {
2634 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2635
2636 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2637 Fn->addFnAttr("stack-probe-size",
2638 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2639 if (CGM.getCodeGenOpts().NoStackArgProbe)
2640 Fn->addFnAttr("no-stack-arg-probe");
2641 }
2642 }
2643
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2644 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2645 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2646 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2647 if (GV->isDeclaration())
2648 return;
2649 addStackProbeTargetAttributes(D, GV, CGM);
2650 }
2651
2652 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2653 public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2654 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2655 X86AVXABILevel AVXLevel)
2656 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2657
2658 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2659 CodeGen::CodeGenModule &CGM) const override;
2660
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2661 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2662 return 7;
2663 }
2664
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2665 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2666 llvm::Value *Address) const override {
2667 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2668
2669 // 0-15 are the 16 integer registers.
2670 // 16 is %rip.
2671 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2672 return false;
2673 }
2674
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2675 void getDependentLibraryOption(llvm::StringRef Lib,
2676 llvm::SmallString<24> &Opt) const override {
2677 Opt = "/DEFAULTLIB:";
2678 Opt += qualifyWindowsLibrary(Lib);
2679 }
2680
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2681 void getDetectMismatchOption(llvm::StringRef Name,
2682 llvm::StringRef Value,
2683 llvm::SmallString<32> &Opt) const override {
2684 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2685 }
2686 };
2687
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2688 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2689 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2690 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2691 if (GV->isDeclaration())
2692 return;
2693 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2694 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2695 llvm::Function *Fn = cast<llvm::Function>(GV);
2696 Fn->addFnAttr("stackrealign");
2697 }
2698
2699 addX86InterruptAttrs(FD, GV, CGM);
2700 }
2701
2702 addStackProbeTargetAttributes(D, GV, CGM);
2703 }
2704 }
2705
postMerge(unsigned AggregateSize,Class & Lo,Class & Hi) const2706 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2707 Class &Hi) const {
2708 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2709 //
2710 // (a) If one of the classes is Memory, the whole argument is passed in
2711 // memory.
2712 //
2713 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2714 // memory.
2715 //
2716 // (c) If the size of the aggregate exceeds two eightbytes and the first
2717 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2718 // argument is passed in memory. NOTE: This is necessary to keep the
2719 // ABI working for processors that don't support the __m256 type.
2720 //
2721 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2722 //
2723 // Some of these are enforced by the merging logic. Others can arise
2724 // only with unions; for example:
2725 // union { _Complex double; unsigned; }
2726 //
2727 // Note that clauses (b) and (c) were added in 0.98.
2728 //
2729 if (Hi == Memory)
2730 Lo = Memory;
2731 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2732 Lo = Memory;
2733 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2734 Lo = Memory;
2735 if (Hi == SSEUp && Lo != SSE)
2736 Hi = SSE;
2737 }
2738
merge(Class Accum,Class Field)2739 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2740 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2741 // classified recursively so that always two fields are
2742 // considered. The resulting class is calculated according to
2743 // the classes of the fields in the eightbyte:
2744 //
2745 // (a) If both classes are equal, this is the resulting class.
2746 //
2747 // (b) If one of the classes is NO_CLASS, the resulting class is
2748 // the other class.
2749 //
2750 // (c) If one of the classes is MEMORY, the result is the MEMORY
2751 // class.
2752 //
2753 // (d) If one of the classes is INTEGER, the result is the
2754 // INTEGER.
2755 //
2756 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2757 // MEMORY is used as class.
2758 //
2759 // (f) Otherwise class SSE is used.
2760
2761 // Accum should never be memory (we should have returned) or
2762 // ComplexX87 (because this cannot be passed in a structure).
2763 assert((Accum != Memory && Accum != ComplexX87) &&
2764 "Invalid accumulated classification during merge.");
2765 if (Accum == Field || Field == NoClass)
2766 return Accum;
2767 if (Field == Memory)
2768 return Memory;
2769 if (Accum == NoClass)
2770 return Field;
2771 if (Accum == Integer || Field == Integer)
2772 return Integer;
2773 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2774 Accum == X87 || Accum == X87Up)
2775 return Memory;
2776 return SSE;
2777 }
2778
classify(QualType Ty,uint64_t OffsetBase,Class & Lo,Class & Hi,bool isNamedArg) const2779 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2780 Class &Lo, Class &Hi, bool isNamedArg) const {
2781 // FIXME: This code can be simplified by introducing a simple value class for
2782 // Class pairs with appropriate constructor methods for the various
2783 // situations.
2784
2785 // FIXME: Some of the split computations are wrong; unaligned vectors
2786 // shouldn't be passed in registers for example, so there is no chance they
2787 // can straddle an eightbyte. Verify & simplify.
2788
2789 Lo = Hi = NoClass;
2790
2791 Class &Current = OffsetBase < 64 ? Lo : Hi;
2792 Current = Memory;
2793
2794 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2795 BuiltinType::Kind k = BT->getKind();
2796
2797 if (k == BuiltinType::Void) {
2798 Current = NoClass;
2799 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2800 Lo = Integer;
2801 Hi = Integer;
2802 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2803 Current = Integer;
2804 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2805 Current = SSE;
2806 } else if (k == BuiltinType::LongDouble) {
2807 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2808 if (LDF == &llvm::APFloat::IEEEquad()) {
2809 Lo = SSE;
2810 Hi = SSEUp;
2811 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2812 Lo = X87;
2813 Hi = X87Up;
2814 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2815 Current = SSE;
2816 } else
2817 llvm_unreachable("unexpected long double representation!");
2818 }
2819 // FIXME: _Decimal32 and _Decimal64 are SSE.
2820 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2821 return;
2822 }
2823
2824 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2825 // Classify the underlying integer type.
2826 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2827 return;
2828 }
2829
2830 if (Ty->hasPointerRepresentation()) {
2831 Current = Integer;
2832 return;
2833 }
2834
2835 if (Ty->isMemberPointerType()) {
2836 if (Ty->isMemberFunctionPointerType()) {
2837 if (Has64BitPointers) {
2838 // If Has64BitPointers, this is an {i64, i64}, so classify both
2839 // Lo and Hi now.
2840 Lo = Hi = Integer;
2841 } else {
2842 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2843 // straddles an eightbyte boundary, Hi should be classified as well.
2844 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2845 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2846 if (EB_FuncPtr != EB_ThisAdj) {
2847 Lo = Hi = Integer;
2848 } else {
2849 Current = Integer;
2850 }
2851 }
2852 } else {
2853 Current = Integer;
2854 }
2855 return;
2856 }
2857
2858 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2859 uint64_t Size = getContext().getTypeSize(VT);
2860 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2861 // gcc passes the following as integer:
2862 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2863 // 2 bytes - <2 x char>, <1 x short>
2864 // 1 byte - <1 x char>
2865 Current = Integer;
2866
2867 // If this type crosses an eightbyte boundary, it should be
2868 // split.
2869 uint64_t EB_Lo = (OffsetBase) / 64;
2870 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2871 if (EB_Lo != EB_Hi)
2872 Hi = Lo;
2873 } else if (Size == 64) {
2874 QualType ElementType = VT->getElementType();
2875
2876 // gcc passes <1 x double> in memory. :(
2877 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2878 return;
2879
2880 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2881 // pass them as integer. For platforms where clang is the de facto
2882 // platform compiler, we must continue to use integer.
2883 if (!classifyIntegerMMXAsSSE() &&
2884 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2885 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2886 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2887 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2888 Current = Integer;
2889 else
2890 Current = SSE;
2891
2892 // If this type crosses an eightbyte boundary, it should be
2893 // split.
2894 if (OffsetBase && OffsetBase != 64)
2895 Hi = Lo;
2896 } else if (Size == 128 ||
2897 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2898 QualType ElementType = VT->getElementType();
2899
2900 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2901 if (passInt128VectorsInMem() && Size != 128 &&
2902 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2903 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2904 return;
2905
2906 // Arguments of 256-bits are split into four eightbyte chunks. The
2907 // least significant one belongs to class SSE and all the others to class
2908 // SSEUP. The original Lo and Hi design considers that types can't be
2909 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2910 // This design isn't correct for 256-bits, but since there're no cases
2911 // where the upper parts would need to be inspected, avoid adding
2912 // complexity and just consider Hi to match the 64-256 part.
2913 //
2914 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2915 // registers if they are "named", i.e. not part of the "..." of a
2916 // variadic function.
2917 //
2918 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2919 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2920 Lo = SSE;
2921 Hi = SSEUp;
2922 }
2923 return;
2924 }
2925
2926 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2927 QualType ET = getContext().getCanonicalType(CT->getElementType());
2928
2929 uint64_t Size = getContext().getTypeSize(Ty);
2930 if (ET->isIntegralOrEnumerationType()) {
2931 if (Size <= 64)
2932 Current = Integer;
2933 else if (Size <= 128)
2934 Lo = Hi = Integer;
2935 } else if (ET == getContext().FloatTy) {
2936 Current = SSE;
2937 } else if (ET == getContext().DoubleTy) {
2938 Lo = Hi = SSE;
2939 } else if (ET == getContext().LongDoubleTy) {
2940 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2941 if (LDF == &llvm::APFloat::IEEEquad())
2942 Current = Memory;
2943 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2944 Current = ComplexX87;
2945 else if (LDF == &llvm::APFloat::IEEEdouble())
2946 Lo = Hi = SSE;
2947 else
2948 llvm_unreachable("unexpected long double representation!");
2949 }
2950
2951 // If this complex type crosses an eightbyte boundary then it
2952 // should be split.
2953 uint64_t EB_Real = (OffsetBase) / 64;
2954 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2955 if (Hi == NoClass && EB_Real != EB_Imag)
2956 Hi = Lo;
2957
2958 return;
2959 }
2960
2961 if (const auto *EITy = Ty->getAs<ExtIntType>()) {
2962 if (EITy->getNumBits() <= 64)
2963 Current = Integer;
2964 else if (EITy->getNumBits() <= 128)
2965 Lo = Hi = Integer;
2966 // Larger values need to get passed in memory.
2967 return;
2968 }
2969
2970 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2971 // Arrays are treated like structures.
2972
2973 uint64_t Size = getContext().getTypeSize(Ty);
2974
2975 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2976 // than eight eightbytes, ..., it has class MEMORY.
2977 if (Size > 512)
2978 return;
2979
2980 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2981 // fields, it has class MEMORY.
2982 //
2983 // Only need to check alignment of array base.
2984 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2985 return;
2986
2987 // Otherwise implement simplified merge. We could be smarter about
2988 // this, but it isn't worth it and would be harder to verify.
2989 Current = NoClass;
2990 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2991 uint64_t ArraySize = AT->getSize().getZExtValue();
2992
2993 // The only case a 256-bit wide vector could be used is when the array
2994 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2995 // to work for sizes wider than 128, early check and fallback to memory.
2996 //
2997 if (Size > 128 &&
2998 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2999 return;
3000
3001 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
3002 Class FieldLo, FieldHi;
3003 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
3004 Lo = merge(Lo, FieldLo);
3005 Hi = merge(Hi, FieldHi);
3006 if (Lo == Memory || Hi == Memory)
3007 break;
3008 }
3009
3010 postMerge(Size, Lo, Hi);
3011 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
3012 return;
3013 }
3014
3015 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3016 uint64_t Size = getContext().getTypeSize(Ty);
3017
3018 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3019 // than eight eightbytes, ..., it has class MEMORY.
3020 if (Size > 512)
3021 return;
3022
3023 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3024 // copy constructor or a non-trivial destructor, it is passed by invisible
3025 // reference.
3026 if (getRecordArgABI(RT, getCXXABI()))
3027 return;
3028
3029 const RecordDecl *RD = RT->getDecl();
3030
3031 // Assume variable sized types are passed in memory.
3032 if (RD->hasFlexibleArrayMember())
3033 return;
3034
3035 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3036
3037 // Reset Lo class, this will be recomputed.
3038 Current = NoClass;
3039
3040 // If this is a C++ record, classify the bases first.
3041 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3042 for (const auto &I : CXXRD->bases()) {
3043 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3044 "Unexpected base class!");
3045 const auto *Base =
3046 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3047
3048 // Classify this field.
3049 //
3050 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3051 // single eightbyte, each is classified separately. Each eightbyte gets
3052 // initialized to class NO_CLASS.
3053 Class FieldLo, FieldHi;
3054 uint64_t Offset =
3055 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3056 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3057 Lo = merge(Lo, FieldLo);
3058 Hi = merge(Hi, FieldHi);
3059 if (Lo == Memory || Hi == Memory) {
3060 postMerge(Size, Lo, Hi);
3061 return;
3062 }
3063 }
3064 }
3065
3066 // Classify the fields one at a time, merging the results.
3067 unsigned idx = 0;
3068 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3069 LangOptions::ClangABI::Ver11 ||
3070 getContext().getTargetInfo().getTriple().isPS4();
3071 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
3072
3073 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3074 i != e; ++i, ++idx) {
3075 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3076 bool BitField = i->isBitField();
3077
3078 // Ignore padding bit-fields.
3079 if (BitField && i->isUnnamedBitfield())
3080 continue;
3081
3082 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3083 // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
3084 //
3085 // The only case a 256-bit or a 512-bit wide vector could be used is when
3086 // the struct contains a single 256-bit or 512-bit element. Early check
3087 // and fallback to memory.
3088 //
3089 // FIXME: Extended the Lo and Hi logic properly to work for size wider
3090 // than 128.
3091 if (Size > 128 &&
3092 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3093 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3094 Lo = Memory;
3095 postMerge(Size, Lo, Hi);
3096 return;
3097 }
3098 // Note, skip this test for bit-fields, see below.
3099 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3100 Lo = Memory;
3101 postMerge(Size, Lo, Hi);
3102 return;
3103 }
3104
3105 // Classify this field.
3106 //
3107 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3108 // exceeds a single eightbyte, each is classified
3109 // separately. Each eightbyte gets initialized to class
3110 // NO_CLASS.
3111 Class FieldLo, FieldHi;
3112
3113 // Bit-fields require special handling, they do not force the
3114 // structure to be passed in memory even if unaligned, and
3115 // therefore they can straddle an eightbyte.
3116 if (BitField) {
3117 assert(!i->isUnnamedBitfield());
3118 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3119 uint64_t Size = i->getBitWidthValue(getContext());
3120
3121 uint64_t EB_Lo = Offset / 64;
3122 uint64_t EB_Hi = (Offset + Size - 1) / 64;
3123
3124 if (EB_Lo) {
3125 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
3126 FieldLo = NoClass;
3127 FieldHi = Integer;
3128 } else {
3129 FieldLo = Integer;
3130 FieldHi = EB_Hi ? Integer : NoClass;
3131 }
3132 } else
3133 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3134 Lo = merge(Lo, FieldLo);
3135 Hi = merge(Hi, FieldHi);
3136 if (Lo == Memory || Hi == Memory)
3137 break;
3138 }
3139
3140 postMerge(Size, Lo, Hi);
3141 }
3142 }
3143
getIndirectReturnResult(QualType Ty) const3144 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3145 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3146 // place naturally.
3147 if (!isAggregateTypeForABI(Ty)) {
3148 // Treat an enum type as its underlying type.
3149 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3150 Ty = EnumTy->getDecl()->getIntegerType();
3151
3152 if (Ty->isExtIntType())
3153 return getNaturalAlignIndirect(Ty);
3154
3155 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3156 : ABIArgInfo::getDirect());
3157 }
3158
3159 return getNaturalAlignIndirect(Ty);
3160 }
3161
IsIllegalVectorType(QualType Ty) const3162 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3163 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3164 uint64_t Size = getContext().getTypeSize(VecTy);
3165 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3166 if (Size <= 64 || Size > LargestVector)
3167 return true;
3168 QualType EltTy = VecTy->getElementType();
3169 if (passInt128VectorsInMem() &&
3170 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3171 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3172 return true;
3173 }
3174
3175 return false;
3176 }
3177
getIndirectResult(QualType Ty,unsigned freeIntRegs) const3178 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3179 unsigned freeIntRegs) const {
3180 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3181 // place naturally.
3182 //
3183 // This assumption is optimistic, as there could be free registers available
3184 // when we need to pass this argument in memory, and LLVM could try to pass
3185 // the argument in the free register. This does not seem to happen currently,
3186 // but this code would be much safer if we could mark the argument with
3187 // 'onstack'. See PR12193.
3188 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3189 !Ty->isExtIntType()) {
3190 // Treat an enum type as its underlying type.
3191 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3192 Ty = EnumTy->getDecl()->getIntegerType();
3193
3194 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3195 : ABIArgInfo::getDirect());
3196 }
3197
3198 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3199 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3200
3201 // Compute the byval alignment. We specify the alignment of the byval in all
3202 // cases so that the mid-level optimizer knows the alignment of the byval.
3203 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3204
3205 // Attempt to avoid passing indirect results using byval when possible. This
3206 // is important for good codegen.
3207 //
3208 // We do this by coercing the value into a scalar type which the backend can
3209 // handle naturally (i.e., without using byval).
3210 //
3211 // For simplicity, we currently only do this when we have exhausted all of the
3212 // free integer registers. Doing this when there are free integer registers
3213 // would require more care, as we would have to ensure that the coerced value
3214 // did not claim the unused register. That would require either reording the
3215 // arguments to the function (so that any subsequent inreg values came first),
3216 // or only doing this optimization when there were no following arguments that
3217 // might be inreg.
3218 //
3219 // We currently expect it to be rare (particularly in well written code) for
3220 // arguments to be passed on the stack when there are still free integer
3221 // registers available (this would typically imply large structs being passed
3222 // by value), so this seems like a fair tradeoff for now.
3223 //
3224 // We can revisit this if the backend grows support for 'onstack' parameter
3225 // attributes. See PR12193.
3226 if (freeIntRegs == 0) {
3227 uint64_t Size = getContext().getTypeSize(Ty);
3228
3229 // If this type fits in an eightbyte, coerce it into the matching integral
3230 // type, which will end up on the stack (with alignment 8).
3231 if (Align == 8 && Size <= 64)
3232 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3233 Size));
3234 }
3235
3236 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
3237 }
3238
3239 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
3240 /// register. Pick an LLVM IR type that will be passed as a vector register.
GetByteVectorType(QualType Ty) const3241 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3242 // Wrapper structs/arrays that only contain vectors are passed just like
3243 // vectors; strip them off if present.
3244 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3245 Ty = QualType(InnerTy, 0);
3246
3247 llvm::Type *IRType = CGT.ConvertType(Ty);
3248 if (isa<llvm::VectorType>(IRType)) {
3249 // Don't pass vXi128 vectors in their native type, the backend can't
3250 // legalize them.
3251 if (passInt128VectorsInMem() &&
3252 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3253 // Use a vXi64 vector.
3254 uint64_t Size = getContext().getTypeSize(Ty);
3255 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3256 Size / 64);
3257 }
3258
3259 return IRType;
3260 }
3261
3262 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3263 return IRType;
3264
3265 // We couldn't find the preferred IR vector type for 'Ty'.
3266 uint64_t Size = getContext().getTypeSize(Ty);
3267 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
3268
3269
3270 // Return a LLVM IR vector type based on the size of 'Ty'.
3271 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3272 Size / 64);
3273 }
3274
3275 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
3276 /// is known to either be off the end of the specified type or being in
3277 /// alignment padding. The user type specified is known to be at most 128 bits
3278 /// in size, and have passed through X86_64ABIInfo::classify with a successful
3279 /// classification that put one of the two halves in the INTEGER class.
3280 ///
3281 /// It is conservatively correct to return false.
BitsContainNoUserData(QualType Ty,unsigned StartBit,unsigned EndBit,ASTContext & Context)3282 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3283 unsigned EndBit, ASTContext &Context) {
3284 // If the bytes being queried are off the end of the type, there is no user
3285 // data hiding here. This handles analysis of builtins, vectors and other
3286 // types that don't contain interesting padding.
3287 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3288 if (TySize <= StartBit)
3289 return true;
3290
3291 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3292 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3293 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3294
3295 // Check each element to see if the element overlaps with the queried range.
3296 for (unsigned i = 0; i != NumElts; ++i) {
3297 // If the element is after the span we care about, then we're done..
3298 unsigned EltOffset = i*EltSize;
3299 if (EltOffset >= EndBit) break;
3300
3301 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3302 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3303 EndBit-EltOffset, Context))
3304 return false;
3305 }
3306 // If it overlaps no elements, then it is safe to process as padding.
3307 return true;
3308 }
3309
3310 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3311 const RecordDecl *RD = RT->getDecl();
3312 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3313
3314 // If this is a C++ record, check the bases first.
3315 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3316 for (const auto &I : CXXRD->bases()) {
3317 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3318 "Unexpected base class!");
3319 const auto *Base =
3320 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3321
3322 // If the base is after the span we care about, ignore it.
3323 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3324 if (BaseOffset >= EndBit) continue;
3325
3326 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3327 if (!BitsContainNoUserData(I.getType(), BaseStart,
3328 EndBit-BaseOffset, Context))
3329 return false;
3330 }
3331 }
3332
3333 // Verify that no field has data that overlaps the region of interest. Yes
3334 // this could be sped up a lot by being smarter about queried fields,
3335 // however we're only looking at structs up to 16 bytes, so we don't care
3336 // much.
3337 unsigned idx = 0;
3338 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3339 i != e; ++i, ++idx) {
3340 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3341
3342 // If we found a field after the region we care about, then we're done.
3343 if (FieldOffset >= EndBit) break;
3344
3345 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3346 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3347 Context))
3348 return false;
3349 }
3350
3351 // If nothing in this record overlapped the area of interest, then we're
3352 // clean.
3353 return true;
3354 }
3355
3356 return false;
3357 }
3358
3359 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3360 /// float member at the specified offset. For example, {int,{float}} has a
3361 /// float at offset 4. It is conservatively correct for this routine to return
3362 /// false.
ContainsFloatAtOffset(llvm::Type * IRType,unsigned IROffset,const llvm::DataLayout & TD)3363 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3364 const llvm::DataLayout &TD) {
3365 // Base case if we find a float.
3366 if (IROffset == 0 && IRType->isFloatTy())
3367 return true;
3368
3369 // If this is a struct, recurse into the field at the specified offset.
3370 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3371 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3372 unsigned Elt = SL->getElementContainingOffset(IROffset);
3373 IROffset -= SL->getElementOffset(Elt);
3374 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3375 }
3376
3377 // If this is an array, recurse into the field at the specified offset.
3378 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3379 llvm::Type *EltTy = ATy->getElementType();
3380 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3381 IROffset -= IROffset/EltSize*EltSize;
3382 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3383 }
3384
3385 return false;
3386 }
3387
3388
3389 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3390 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3391 llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const3392 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3393 QualType SourceTy, unsigned SourceOffset) const {
3394 // The only three choices we have are either double, <2 x float>, or float. We
3395 // pass as float if the last 4 bytes is just padding. This happens for
3396 // structs that contain 3 floats.
3397 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3398 SourceOffset*8+64, getContext()))
3399 return llvm::Type::getFloatTy(getVMContext());
3400
3401 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3402 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3403 // case.
3404 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3405 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3406 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
3407 2);
3408
3409 return llvm::Type::getDoubleTy(getVMContext());
3410 }
3411
3412
3413 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3414 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3415 /// about the high or low part of an up-to-16-byte struct. This routine picks
3416 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3417 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3418 /// etc).
3419 ///
3420 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3421 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3422 /// the 8-byte value references. PrefType may be null.
3423 ///
3424 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3425 /// an offset into this that we're processing (which is always either 0 or 8).
3426 ///
3427 llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const3428 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3429 QualType SourceTy, unsigned SourceOffset) const {
3430 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3431 // returning an 8-byte unit starting with it. See if we can safely use it.
3432 if (IROffset == 0) {
3433 // Pointers and int64's always fill the 8-byte unit.
3434 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3435 IRType->isIntegerTy(64))
3436 return IRType;
3437
3438 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3439 // goodness in the source type is just tail padding. This is allowed to
3440 // kick in for struct {double,int} on the int, but not on
3441 // struct{double,int,int} because we wouldn't return the second int. We
3442 // have to do this analysis on the source type because we can't depend on
3443 // unions being lowered a specific way etc.
3444 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3445 IRType->isIntegerTy(32) ||
3446 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3447 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3448 cast<llvm::IntegerType>(IRType)->getBitWidth();
3449
3450 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3451 SourceOffset*8+64, getContext()))
3452 return IRType;
3453 }
3454 }
3455
3456 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3457 // If this is a struct, recurse into the field at the specified offset.
3458 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3459 if (IROffset < SL->getSizeInBytes()) {
3460 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3461 IROffset -= SL->getElementOffset(FieldIdx);
3462
3463 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3464 SourceTy, SourceOffset);
3465 }
3466 }
3467
3468 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3469 llvm::Type *EltTy = ATy->getElementType();
3470 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3471 unsigned EltOffset = IROffset/EltSize*EltSize;
3472 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3473 SourceOffset);
3474 }
3475
3476 // Okay, we don't have any better idea of what to pass, so we pass this in an
3477 // integer register that isn't too big to fit the rest of the struct.
3478 unsigned TySizeInBytes =
3479 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3480
3481 assert(TySizeInBytes != SourceOffset && "Empty field?");
3482
3483 // It is always safe to classify this as an integer type up to i64 that
3484 // isn't larger than the structure.
3485 return llvm::IntegerType::get(getVMContext(),
3486 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3487 }
3488
3489
3490 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3491 /// be used as elements of a two register pair to pass or return, return a
3492 /// first class aggregate to represent them. For example, if the low part of
3493 /// a by-value argument should be passed as i32* and the high part as float,
3494 /// return {i32*, float}.
3495 static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type * Lo,llvm::Type * Hi,const llvm::DataLayout & TD)3496 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3497 const llvm::DataLayout &TD) {
3498 // In order to correctly satisfy the ABI, we need to the high part to start
3499 // at offset 8. If the high and low parts we inferred are both 4-byte types
3500 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3501 // the second element at offset 8. Check for this:
3502 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3503 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3504 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3505 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3506
3507 // To handle this, we have to increase the size of the low part so that the
3508 // second element will start at an 8 byte offset. We can't increase the size
3509 // of the second element because it might make us access off the end of the
3510 // struct.
3511 if (HiStart != 8) {
3512 // There are usually two sorts of types the ABI generation code can produce
3513 // for the low part of a pair that aren't 8 bytes in size: float or
3514 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3515 // NaCl).
3516 // Promote these to a larger type.
3517 if (Lo->isFloatTy())
3518 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3519 else {
3520 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3521 && "Invalid/unknown lo type");
3522 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3523 }
3524 }
3525
3526 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3527
3528 // Verify that the second element is at an 8-byte offset.
3529 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3530 "Invalid x86-64 argument pair!");
3531 return Result;
3532 }
3533
3534 ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const3535 classifyReturnType(QualType RetTy) const {
3536 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3537 // classification algorithm.
3538 X86_64ABIInfo::Class Lo, Hi;
3539 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3540
3541 // Check some invariants.
3542 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3543 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3544
3545 llvm::Type *ResType = nullptr;
3546 switch (Lo) {
3547 case NoClass:
3548 if (Hi == NoClass)
3549 return ABIArgInfo::getIgnore();
3550 // If the low part is just padding, it takes no register, leave ResType
3551 // null.
3552 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3553 "Unknown missing lo part");
3554 break;
3555
3556 case SSEUp:
3557 case X87Up:
3558 llvm_unreachable("Invalid classification for lo word.");
3559
3560 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3561 // hidden argument.
3562 case Memory:
3563 return getIndirectReturnResult(RetTy);
3564
3565 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3566 // available register of the sequence %rax, %rdx is used.
3567 case Integer:
3568 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3569
3570 // If we have a sign or zero extended integer, make sure to return Extend
3571 // so that the parameter gets the right LLVM IR attributes.
3572 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3573 // Treat an enum type as its underlying type.
3574 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3575 RetTy = EnumTy->getDecl()->getIntegerType();
3576
3577 if (RetTy->isIntegralOrEnumerationType() &&
3578 isPromotableIntegerTypeForABI(RetTy))
3579 return ABIArgInfo::getExtend(RetTy);
3580 }
3581 break;
3582
3583 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3584 // available SSE register of the sequence %xmm0, %xmm1 is used.
3585 case SSE:
3586 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3587 break;
3588
3589 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3590 // returned on the X87 stack in %st0 as 80-bit x87 number.
3591 case X87:
3592 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3593 break;
3594
3595 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3596 // part of the value is returned in %st0 and the imaginary part in
3597 // %st1.
3598 case ComplexX87:
3599 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3600 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3601 llvm::Type::getX86_FP80Ty(getVMContext()));
3602 break;
3603 }
3604
3605 llvm::Type *HighPart = nullptr;
3606 switch (Hi) {
3607 // Memory was handled previously and X87 should
3608 // never occur as a hi class.
3609 case Memory:
3610 case X87:
3611 llvm_unreachable("Invalid classification for hi word.");
3612
3613 case ComplexX87: // Previously handled.
3614 case NoClass:
3615 break;
3616
3617 case Integer:
3618 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3619 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3620 return ABIArgInfo::getDirect(HighPart, 8);
3621 break;
3622 case SSE:
3623 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3624 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3625 return ABIArgInfo::getDirect(HighPart, 8);
3626 break;
3627
3628 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3629 // is passed in the next available eightbyte chunk if the last used
3630 // vector register.
3631 //
3632 // SSEUP should always be preceded by SSE, just widen.
3633 case SSEUp:
3634 assert(Lo == SSE && "Unexpected SSEUp classification.");
3635 ResType = GetByteVectorType(RetTy);
3636 break;
3637
3638 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3639 // returned together with the previous X87 value in %st0.
3640 case X87Up:
3641 // If X87Up is preceded by X87, we don't need to do
3642 // anything. However, in some cases with unions it may not be
3643 // preceded by X87. In such situations we follow gcc and pass the
3644 // extra bits in an SSE reg.
3645 if (Lo != X87) {
3646 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3647 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3648 return ABIArgInfo::getDirect(HighPart, 8);
3649 }
3650 break;
3651 }
3652
3653 // If a high part was specified, merge it together with the low part. It is
3654 // known to pass in the high eightbyte of the result. We do this by forming a
3655 // first class struct aggregate with the high and low part: {low, high}
3656 if (HighPart)
3657 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3658
3659 return ABIArgInfo::getDirect(ResType);
3660 }
3661
classifyArgumentType(QualType Ty,unsigned freeIntRegs,unsigned & neededInt,unsigned & neededSSE,bool isNamedArg) const3662 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3663 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3664 bool isNamedArg)
3665 const
3666 {
3667 Ty = useFirstFieldIfTransparentUnion(Ty);
3668
3669 X86_64ABIInfo::Class Lo, Hi;
3670 classify(Ty, 0, Lo, Hi, isNamedArg);
3671
3672 // Check some invariants.
3673 // FIXME: Enforce these by construction.
3674 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3675 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3676
3677 neededInt = 0;
3678 neededSSE = 0;
3679 llvm::Type *ResType = nullptr;
3680 switch (Lo) {
3681 case NoClass:
3682 if (Hi == NoClass)
3683 return ABIArgInfo::getIgnore();
3684 // If the low part is just padding, it takes no register, leave ResType
3685 // null.
3686 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3687 "Unknown missing lo part");
3688 break;
3689
3690 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3691 // on the stack.
3692 case Memory:
3693
3694 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3695 // COMPLEX_X87, it is passed in memory.
3696 case X87:
3697 case ComplexX87:
3698 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3699 ++neededInt;
3700 return getIndirectResult(Ty, freeIntRegs);
3701
3702 case SSEUp:
3703 case X87Up:
3704 llvm_unreachable("Invalid classification for lo word.");
3705
3706 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3707 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3708 // and %r9 is used.
3709 case Integer:
3710 ++neededInt;
3711
3712 // Pick an 8-byte type based on the preferred type.
3713 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3714
3715 // If we have a sign or zero extended integer, make sure to return Extend
3716 // so that the parameter gets the right LLVM IR attributes.
3717 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3718 // Treat an enum type as its underlying type.
3719 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3720 Ty = EnumTy->getDecl()->getIntegerType();
3721
3722 if (Ty->isIntegralOrEnumerationType() &&
3723 isPromotableIntegerTypeForABI(Ty))
3724 return ABIArgInfo::getExtend(Ty);
3725 }
3726
3727 break;
3728
3729 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3730 // available SSE register is used, the registers are taken in the
3731 // order from %xmm0 to %xmm7.
3732 case SSE: {
3733 llvm::Type *IRType = CGT.ConvertType(Ty);
3734 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3735 ++neededSSE;
3736 break;
3737 }
3738 }
3739
3740 llvm::Type *HighPart = nullptr;
3741 switch (Hi) {
3742 // Memory was handled previously, ComplexX87 and X87 should
3743 // never occur as hi classes, and X87Up must be preceded by X87,
3744 // which is passed in memory.
3745 case Memory:
3746 case X87:
3747 case ComplexX87:
3748 llvm_unreachable("Invalid classification for hi word.");
3749
3750 case NoClass: break;
3751
3752 case Integer:
3753 ++neededInt;
3754 // Pick an 8-byte type based on the preferred type.
3755 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3756
3757 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3758 return ABIArgInfo::getDirect(HighPart, 8);
3759 break;
3760
3761 // X87Up generally doesn't occur here (long double is passed in
3762 // memory), except in situations involving unions.
3763 case X87Up:
3764 case SSE:
3765 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3766
3767 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3768 return ABIArgInfo::getDirect(HighPart, 8);
3769
3770 ++neededSSE;
3771 break;
3772
3773 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3774 // eightbyte is passed in the upper half of the last used SSE
3775 // register. This only happens when 128-bit vectors are passed.
3776 case SSEUp:
3777 assert(Lo == SSE && "Unexpected SSEUp classification");
3778 ResType = GetByteVectorType(Ty);
3779 break;
3780 }
3781
3782 // If a high part was specified, merge it together with the low part. It is
3783 // known to pass in the high eightbyte of the result. We do this by forming a
3784 // first class struct aggregate with the high and low part: {low, high}
3785 if (HighPart)
3786 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3787
3788 return ABIArgInfo::getDirect(ResType);
3789 }
3790
3791 ABIArgInfo
classifyRegCallStructTypeImpl(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE) const3792 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3793 unsigned &NeededSSE) const {
3794 auto RT = Ty->getAs<RecordType>();
3795 assert(RT && "classifyRegCallStructType only valid with struct types");
3796
3797 if (RT->getDecl()->hasFlexibleArrayMember())
3798 return getIndirectReturnResult(Ty);
3799
3800 // Sum up bases
3801 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3802 if (CXXRD->isDynamicClass()) {
3803 NeededInt = NeededSSE = 0;
3804 return getIndirectReturnResult(Ty);
3805 }
3806
3807 for (const auto &I : CXXRD->bases())
3808 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3809 .isIndirect()) {
3810 NeededInt = NeededSSE = 0;
3811 return getIndirectReturnResult(Ty);
3812 }
3813 }
3814
3815 // Sum up members
3816 for (const auto *FD : RT->getDecl()->fields()) {
3817 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3818 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3819 .isIndirect()) {
3820 NeededInt = NeededSSE = 0;
3821 return getIndirectReturnResult(Ty);
3822 }
3823 } else {
3824 unsigned LocalNeededInt, LocalNeededSSE;
3825 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3826 LocalNeededSSE, true)
3827 .isIndirect()) {
3828 NeededInt = NeededSSE = 0;
3829 return getIndirectReturnResult(Ty);
3830 }
3831 NeededInt += LocalNeededInt;
3832 NeededSSE += LocalNeededSSE;
3833 }
3834 }
3835
3836 return ABIArgInfo::getDirect();
3837 }
3838
classifyRegCallStructType(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE) const3839 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3840 unsigned &NeededInt,
3841 unsigned &NeededSSE) const {
3842
3843 NeededInt = 0;
3844 NeededSSE = 0;
3845
3846 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3847 }
3848
computeInfo(CGFunctionInfo & FI) const3849 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3850
3851 const unsigned CallingConv = FI.getCallingConvention();
3852 // It is possible to force Win64 calling convention on any x86_64 target by
3853 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3854 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3855 if (CallingConv == llvm::CallingConv::Win64) {
3856 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3857 Win64ABIInfo.computeInfo(FI);
3858 return;
3859 }
3860
3861 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3862
3863 // Keep track of the number of assigned registers.
3864 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3865 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3866 unsigned NeededInt, NeededSSE;
3867
3868 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3869 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3870 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3871 FI.getReturnInfo() =
3872 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3873 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3874 FreeIntRegs -= NeededInt;
3875 FreeSSERegs -= NeededSSE;
3876 } else {
3877 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3878 }
3879 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3880 getContext().getCanonicalType(FI.getReturnType()
3881 ->getAs<ComplexType>()
3882 ->getElementType()) ==
3883 getContext().LongDoubleTy)
3884 // Complex Long Double Type is passed in Memory when Regcall
3885 // calling convention is used.
3886 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3887 else
3888 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3889 }
3890
3891 // If the return value is indirect, then the hidden argument is consuming one
3892 // integer register.
3893 if (FI.getReturnInfo().isIndirect())
3894 --FreeIntRegs;
3895
3896 // The chain argument effectively gives us another free register.
3897 if (FI.isChainCall())
3898 ++FreeIntRegs;
3899
3900 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3901 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3902 // get assigned (in left-to-right order) for passing as follows...
3903 unsigned ArgNo = 0;
3904 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3905 it != ie; ++it, ++ArgNo) {
3906 bool IsNamedArg = ArgNo < NumRequiredArgs;
3907
3908 if (IsRegCall && it->type->isStructureOrClassType())
3909 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3910 else
3911 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3912 NeededSSE, IsNamedArg);
3913
3914 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3915 // eightbyte of an argument, the whole argument is passed on the
3916 // stack. If registers have already been assigned for some
3917 // eightbytes of such an argument, the assignments get reverted.
3918 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3919 FreeIntRegs -= NeededInt;
3920 FreeSSERegs -= NeededSSE;
3921 } else {
3922 it->info = getIndirectResult(it->type, FreeIntRegs);
3923 }
3924 }
3925 }
3926
EmitX86_64VAArgFromMemory(CodeGenFunction & CGF,Address VAListAddr,QualType Ty)3927 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3928 Address VAListAddr, QualType Ty) {
3929 Address overflow_arg_area_p =
3930 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3931 llvm::Value *overflow_arg_area =
3932 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3933
3934 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3935 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3936 // It isn't stated explicitly in the standard, but in practice we use
3937 // alignment greater than 16 where necessary.
3938 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3939 if (Align > CharUnits::fromQuantity(8)) {
3940 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3941 Align);
3942 }
3943
3944 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3945 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3946 llvm::Value *Res =
3947 CGF.Builder.CreateBitCast(overflow_arg_area,
3948 llvm::PointerType::getUnqual(LTy));
3949
3950 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3951 // l->overflow_arg_area + sizeof(type).
3952 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3953 // an 8 byte boundary.
3954
3955 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3956 llvm::Value *Offset =
3957 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3958 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3959 "overflow_arg_area.next");
3960 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3961
3962 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3963 return Address(Res, Align);
3964 }
3965
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3966 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3967 QualType Ty) const {
3968 // Assume that va_list type is correct; should be pointer to LLVM type:
3969 // struct {
3970 // i32 gp_offset;
3971 // i32 fp_offset;
3972 // i8* overflow_arg_area;
3973 // i8* reg_save_area;
3974 // };
3975 unsigned neededInt, neededSSE;
3976
3977 Ty = getContext().getCanonicalType(Ty);
3978 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3979 /*isNamedArg*/false);
3980
3981 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3982 // in the registers. If not go to step 7.
3983 if (!neededInt && !neededSSE)
3984 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3985
3986 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3987 // general purpose registers needed to pass type and num_fp to hold
3988 // the number of floating point registers needed.
3989
3990 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3991 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3992 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3993 //
3994 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3995 // register save space).
3996
3997 llvm::Value *InRegs = nullptr;
3998 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3999 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
4000 if (neededInt) {
4001 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
4002 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
4003 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
4004 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
4005 }
4006
4007 if (neededSSE) {
4008 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
4009 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
4010 llvm::Value *FitsInFP =
4011 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
4012 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
4013 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4014 }
4015
4016 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4017 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4018 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4019 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4020
4021 // Emit code to load the value if it was passed in registers.
4022
4023 CGF.EmitBlock(InRegBlock);
4024
4025 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4026 // an offset of l->gp_offset and/or l->fp_offset. This may require
4027 // copying to a temporary location in case the parameter is passed
4028 // in different register classes or requires an alignment greater
4029 // than 8 for general purpose registers and 16 for XMM registers.
4030 //
4031 // FIXME: This really results in shameful code when we end up needing to
4032 // collect arguments from different places; often what should result in a
4033 // simple assembling of a structure from scattered addresses has many more
4034 // loads than necessary. Can we clean this up?
4035 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4036 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4037 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4038
4039 Address RegAddr = Address::invalid();
4040 if (neededInt && neededSSE) {
4041 // FIXME: Cleanup.
4042 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
4043 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4044 Address Tmp = CGF.CreateMemTemp(Ty);
4045 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4046 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
4047 llvm::Type *TyLo = ST->getElementType(0);
4048 llvm::Type *TyHi = ST->getElementType(1);
4049 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
4050 "Unexpected ABI info for mixed regs");
4051 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4052 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4053 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
4054 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
4055 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4056 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4057
4058 // Copy the first element.
4059 // FIXME: Our choice of alignment here and below is probably pessimistic.
4060 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4061 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4062 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4063 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4064
4065 // Copy the second element.
4066 V = CGF.Builder.CreateAlignedLoad(
4067 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4068 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4069 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4070
4071 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4072 } else if (neededInt) {
4073 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
4074 CharUnits::fromQuantity(8));
4075 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4076
4077 // Copy to a temporary if necessary to ensure the appropriate alignment.
4078 auto TInfo = getContext().getTypeInfoInChars(Ty);
4079 uint64_t TySize = TInfo.Width.getQuantity();
4080 CharUnits TyAlign = TInfo.Align;
4081
4082 // Copy into a temporary if the type is more aligned than the
4083 // register save area.
4084 if (TyAlign.getQuantity() > 8) {
4085 Address Tmp = CGF.CreateMemTemp(Ty);
4086 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4087 RegAddr = Tmp;
4088 }
4089
4090 } else if (neededSSE == 1) {
4091 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4092 CharUnits::fromQuantity(16));
4093 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4094 } else {
4095 assert(neededSSE == 2 && "Invalid number of needed registers!");
4096 // SSE registers are spaced 16 bytes apart in the register save
4097 // area, we need to collect the two eightbytes together.
4098 // The ABI isn't explicit about this, but it seems reasonable
4099 // to assume that the slots are 16-byte aligned, since the stack is
4100 // naturally 16-byte aligned and the prologue is expected to store
4101 // all the SSE registers to the RSA.
4102 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4103 CharUnits::fromQuantity(16));
4104 Address RegAddrHi =
4105 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4106 CharUnits::fromQuantity(16));
4107 llvm::Type *ST = AI.canHaveCoerceToType()
4108 ? AI.getCoerceToType()
4109 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4110 llvm::Value *V;
4111 Address Tmp = CGF.CreateMemTemp(Ty);
4112 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4113 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4114 RegAddrLo, ST->getStructElementType(0)));
4115 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4116 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4117 RegAddrHi, ST->getStructElementType(1)));
4118 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4119
4120 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4121 }
4122
4123 // AMD64-ABI 3.5.7p5: Step 5. Set:
4124 // l->gp_offset = l->gp_offset + num_gp * 8
4125 // l->fp_offset = l->fp_offset + num_fp * 16.
4126 if (neededInt) {
4127 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4128 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4129 gp_offset_p);
4130 }
4131 if (neededSSE) {
4132 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4133 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4134 fp_offset_p);
4135 }
4136 CGF.EmitBranch(ContBlock);
4137
4138 // Emit code to load the value if it was passed in memory.
4139
4140 CGF.EmitBlock(InMemBlock);
4141 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4142
4143 // Return the appropriate result.
4144
4145 CGF.EmitBlock(ContBlock);
4146 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4147 "vaarg.addr");
4148 return ResAddr;
4149 }
4150
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4151 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4152 QualType Ty) const {
4153 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
4154 CGF.getContext().getTypeInfoInChars(Ty),
4155 CharUnits::fromQuantity(8),
4156 /*allowHigherAlign*/ false);
4157 }
4158
reclassifyHvaArgForVectorCall(QualType Ty,unsigned & FreeSSERegs,const ABIArgInfo & current) const4159 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
4160 QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo ¤t) const {
4161 const Type *Base = nullptr;
4162 uint64_t NumElts = 0;
4163
4164 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4165 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4166 FreeSSERegs -= NumElts;
4167 return getDirectX86Hva();
4168 }
4169 return current;
4170 }
4171
classify(QualType Ty,unsigned & FreeSSERegs,bool IsReturnType,bool IsVectorCall,bool IsRegCall) const4172 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4173 bool IsReturnType, bool IsVectorCall,
4174 bool IsRegCall) const {
4175
4176 if (Ty->isVoidType())
4177 return ABIArgInfo::getIgnore();
4178
4179 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4180 Ty = EnumTy->getDecl()->getIntegerType();
4181
4182 TypeInfo Info = getContext().getTypeInfo(Ty);
4183 uint64_t Width = Info.Width;
4184 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4185
4186 const RecordType *RT = Ty->getAs<RecordType>();
4187 if (RT) {
4188 if (!IsReturnType) {
4189 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4190 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4191 }
4192
4193 if (RT->getDecl()->hasFlexibleArrayMember())
4194 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4195
4196 }
4197
4198 const Type *Base = nullptr;
4199 uint64_t NumElts = 0;
4200 // vectorcall adds the concept of a homogenous vector aggregate, similar to
4201 // other targets.
4202 if ((IsVectorCall || IsRegCall) &&
4203 isHomogeneousAggregate(Ty, Base, NumElts)) {
4204 if (IsRegCall) {
4205 if (FreeSSERegs >= NumElts) {
4206 FreeSSERegs -= NumElts;
4207 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4208 return ABIArgInfo::getDirect();
4209 return ABIArgInfo::getExpand();
4210 }
4211 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4212 } else if (IsVectorCall) {
4213 if (FreeSSERegs >= NumElts &&
4214 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4215 FreeSSERegs -= NumElts;
4216 return ABIArgInfo::getDirect();
4217 } else if (IsReturnType) {
4218 return ABIArgInfo::getExpand();
4219 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4220 // HVAs are delayed and reclassified in the 2nd step.
4221 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4222 }
4223 }
4224 }
4225
4226 if (Ty->isMemberPointerType()) {
4227 // If the member pointer is represented by an LLVM int or ptr, pass it
4228 // directly.
4229 llvm::Type *LLTy = CGT.ConvertType(Ty);
4230 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4231 return ABIArgInfo::getDirect();
4232 }
4233
4234 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4235 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4236 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4237 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4238 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4239
4240 // Otherwise, coerce it to a small integer.
4241 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4242 }
4243
4244 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4245 switch (BT->getKind()) {
4246 case BuiltinType::Bool:
4247 // Bool type is always extended to the ABI, other builtin types are not
4248 // extended.
4249 return ABIArgInfo::getExtend(Ty);
4250
4251 case BuiltinType::LongDouble:
4252 // Mingw64 GCC uses the old 80 bit extended precision floating point
4253 // unit. It passes them indirectly through memory.
4254 if (IsMingw64) {
4255 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4256 if (LDF == &llvm::APFloat::x87DoubleExtended())
4257 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4258 }
4259 break;
4260
4261 case BuiltinType::Int128:
4262 case BuiltinType::UInt128:
4263 // If it's a parameter type, the normal ABI rule is that arguments larger
4264 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4265 // even though it isn't particularly efficient.
4266 if (!IsReturnType)
4267 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4268
4269 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4270 // Clang matches them for compatibility.
4271 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4272 llvm::Type::getInt64Ty(getVMContext()), 2));
4273
4274 default:
4275 break;
4276 }
4277 }
4278
4279 if (Ty->isExtIntType()) {
4280 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4281 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4282 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
4283 // anyway as long is it fits in them, so we don't have to check the power of
4284 // 2.
4285 if (Width <= 64)
4286 return ABIArgInfo::getDirect();
4287 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4288 }
4289
4290 return ABIArgInfo::getDirect();
4291 }
4292
computeInfo(CGFunctionInfo & FI) const4293 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4294 const unsigned CC = FI.getCallingConvention();
4295 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4296 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4297
4298 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4299 // classification rules.
4300 if (CC == llvm::CallingConv::X86_64_SysV) {
4301 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4302 SysVABIInfo.computeInfo(FI);
4303 return;
4304 }
4305
4306 unsigned FreeSSERegs = 0;
4307 if (IsVectorCall) {
4308 // We can use up to 4 SSE return registers with vectorcall.
4309 FreeSSERegs = 4;
4310 } else if (IsRegCall) {
4311 // RegCall gives us 16 SSE registers.
4312 FreeSSERegs = 16;
4313 }
4314
4315 if (!getCXXABI().classifyReturnType(FI))
4316 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4317 IsVectorCall, IsRegCall);
4318
4319 if (IsVectorCall) {
4320 // We can use up to 6 SSE register parameters with vectorcall.
4321 FreeSSERegs = 6;
4322 } else if (IsRegCall) {
4323 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4324 FreeSSERegs = 16;
4325 }
4326
4327 unsigned ArgNum = 0;
4328 unsigned ZeroSSERegs = 0;
4329 for (auto &I : FI.arguments()) {
4330 // Vectorcall in x64 only permits the first 6 arguments to be passed as
4331 // XMM/YMM registers. After the sixth argument, pretend no vector
4332 // registers are left.
4333 unsigned *MaybeFreeSSERegs =
4334 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
4335 I.info =
4336 classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
4337 ++ArgNum;
4338 }
4339
4340 if (IsVectorCall) {
4341 // For vectorcall, assign aggregate HVAs to any free vector registers in a
4342 // second pass.
4343 for (auto &I : FI.arguments())
4344 I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
4345 }
4346 }
4347
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4348 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4349 QualType Ty) const {
4350
4351 bool IsIndirect = false;
4352
4353 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4354 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4355 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4356 uint64_t Width = getContext().getTypeSize(Ty);
4357 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4358 }
4359
4360 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4361 CGF.getContext().getTypeInfoInChars(Ty),
4362 CharUnits::fromQuantity(8),
4363 /*allowHigherAlign*/ false);
4364 }
4365
PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address,bool Is64Bit,bool IsAIX)4366 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4367 llvm::Value *Address, bool Is64Bit,
4368 bool IsAIX) {
4369 // This is calculated from the LLVM and GCC tables and verified
4370 // against gcc output. AFAIK all PPC ABIs use the same encoding.
4371
4372 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4373
4374 llvm::IntegerType *i8 = CGF.Int8Ty;
4375 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4376 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4377 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4378
4379 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4380 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4381
4382 // 32-63: fp0-31, the 8-byte floating-point registers
4383 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4384
4385 // 64-67 are various 4-byte or 8-byte special-purpose registers:
4386 // 64: mq
4387 // 65: lr
4388 // 66: ctr
4389 // 67: ap
4390 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4391
4392 // 68-76 are various 4-byte special-purpose registers:
4393 // 68-75 cr0-7
4394 // 76: xer
4395 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4396
4397 // 77-108: v0-31, the 16-byte vector registers
4398 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4399
4400 // 109: vrsave
4401 // 110: vscr
4402 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4403
4404 // AIX does not utilize the rest of the registers.
4405 if (IsAIX)
4406 return false;
4407
4408 // 111: spe_acc
4409 // 112: spefscr
4410 // 113: sfp
4411 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4412
4413 if (!Is64Bit)
4414 return false;
4415
4416 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4417 // or above CPU.
4418 // 64-bit only registers:
4419 // 114: tfhar
4420 // 115: tfiar
4421 // 116: texasr
4422 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4423
4424 return false;
4425 }
4426
4427 // AIX
4428 namespace {
4429 /// AIXABIInfo - The AIX XCOFF ABI information.
4430 class AIXABIInfo : public ABIInfo {
4431 const bool Is64Bit;
4432 const unsigned PtrByteSize;
4433 CharUnits getParamTypeAlignment(QualType Ty) const;
4434
4435 public:
AIXABIInfo(CodeGen::CodeGenTypes & CGT,bool Is64Bit)4436 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4437 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4438
4439 bool isPromotableTypeForABI(QualType Ty) const;
4440
4441 ABIArgInfo classifyReturnType(QualType RetTy) const;
4442 ABIArgInfo classifyArgumentType(QualType Ty) const;
4443
computeInfo(CGFunctionInfo & FI) const4444 void computeInfo(CGFunctionInfo &FI) const override {
4445 if (!getCXXABI().classifyReturnType(FI))
4446 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4447
4448 for (auto &I : FI.arguments())
4449 I.info = classifyArgumentType(I.type);
4450 }
4451
4452 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4453 QualType Ty) const override;
4454 };
4455
4456 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4457 const bool Is64Bit;
4458
4459 public:
AIXTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool Is64Bit)4460 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4461 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4462 Is64Bit(Is64Bit) {}
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4463 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4464 return 1; // r1 is the dedicated stack pointer
4465 }
4466
4467 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4468 llvm::Value *Address) const override;
4469 };
4470 } // namespace
4471
4472 // Return true if the ABI requires Ty to be passed sign- or zero-
4473 // extended to 32/64 bits.
isPromotableTypeForABI(QualType Ty) const4474 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4475 // Treat an enum type as its underlying type.
4476 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4477 Ty = EnumTy->getDecl()->getIntegerType();
4478
4479 // Promotable integer types are required to be promoted by the ABI.
4480 if (Ty->isPromotableIntegerType())
4481 return true;
4482
4483 if (!Is64Bit)
4484 return false;
4485
4486 // For 64 bit mode, in addition to the usual promotable integer types, we also
4487 // need to extend all 32-bit types, since the ABI requires promotion to 64
4488 // bits.
4489 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4490 switch (BT->getKind()) {
4491 case BuiltinType::Int:
4492 case BuiltinType::UInt:
4493 return true;
4494 default:
4495 break;
4496 }
4497
4498 return false;
4499 }
4500
classifyReturnType(QualType RetTy) const4501 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
4502 if (RetTy->isAnyComplexType())
4503 return ABIArgInfo::getDirect();
4504
4505 if (RetTy->isVectorType())
4506 return ABIArgInfo::getDirect();
4507
4508 if (RetTy->isVoidType())
4509 return ABIArgInfo::getIgnore();
4510
4511 if (isAggregateTypeForABI(RetTy))
4512 return getNaturalAlignIndirect(RetTy);
4513
4514 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4515 : ABIArgInfo::getDirect());
4516 }
4517
classifyArgumentType(QualType Ty) const4518 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
4519 Ty = useFirstFieldIfTransparentUnion(Ty);
4520
4521 if (Ty->isAnyComplexType())
4522 return ABIArgInfo::getDirect();
4523
4524 if (Ty->isVectorType())
4525 return ABIArgInfo::getDirect();
4526
4527 if (isAggregateTypeForABI(Ty)) {
4528 // Records with non-trivial destructors/copy-constructors should not be
4529 // passed by value.
4530 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4531 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4532
4533 CharUnits CCAlign = getParamTypeAlignment(Ty);
4534 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4535
4536 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4537 /*Realign*/ TyAlign > CCAlign);
4538 }
4539
4540 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4541 : ABIArgInfo::getDirect());
4542 }
4543
getParamTypeAlignment(QualType Ty) const4544 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4545 // Complex types are passed just like their elements.
4546 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4547 Ty = CTy->getElementType();
4548
4549 if (Ty->isVectorType())
4550 return CharUnits::fromQuantity(16);
4551
4552 // If the structure contains a vector type, the alignment is 16.
4553 if (isRecordWithSIMDVectorType(getContext(), Ty))
4554 return CharUnits::fromQuantity(16);
4555
4556 return CharUnits::fromQuantity(PtrByteSize);
4557 }
4558
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4559 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4560 QualType Ty) const {
4561 if (Ty->isAnyComplexType())
4562 llvm::report_fatal_error("complex type is not supported on AIX yet");
4563
4564 if (Ty->isVectorType())
4565 llvm::report_fatal_error(
4566 "vector types are not yet supported for variadic functions on AIX");
4567
4568 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4569 TypeInfo.Align = getParamTypeAlignment(Ty);
4570
4571 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4572
4573 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4574 SlotSize, /*AllowHigher*/ true);
4575 }
4576
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4577 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4578 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4579 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4580 }
4581
4582 // PowerPC-32
4583 namespace {
4584 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4585 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4586 bool IsSoftFloatABI;
4587 bool IsRetSmallStructInRegABI;
4588
4589 CharUnits getParamTypeAlignment(QualType Ty) const;
4590
4591 public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,bool SoftFloatABI,bool RetSmallStructInRegABI)4592 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4593 bool RetSmallStructInRegABI)
4594 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4595 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4596
4597 ABIArgInfo classifyReturnType(QualType RetTy) const;
4598
computeInfo(CGFunctionInfo & FI) const4599 void computeInfo(CGFunctionInfo &FI) const override {
4600 if (!getCXXABI().classifyReturnType(FI))
4601 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4602 for (auto &I : FI.arguments())
4603 I.info = classifyArgumentType(I.type);
4604 }
4605
4606 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4607 QualType Ty) const override;
4608 };
4609
4610 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4611 public:
PPC32TargetCodeGenInfo(CodeGenTypes & CGT,bool SoftFloatABI,bool RetSmallStructInRegABI)4612 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4613 bool RetSmallStructInRegABI)
4614 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4615 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4616
4617 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4618 const CodeGenOptions &Opts);
4619
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4620 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4621 // This is recovered from gcc output.
4622 return 1; // r1 is the dedicated stack pointer
4623 }
4624
4625 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4626 llvm::Value *Address) const override;
4627 };
4628 }
4629
getParamTypeAlignment(QualType Ty) const4630 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4631 // Complex types are passed just like their elements.
4632 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4633 Ty = CTy->getElementType();
4634
4635 if (Ty->isVectorType())
4636 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4637 : 4);
4638
4639 // For single-element float/vector structs, we consider the whole type
4640 // to have the same alignment requirements as its single element.
4641 const Type *AlignTy = nullptr;
4642 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4643 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4644 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4645 (BT && BT->isFloatingPoint()))
4646 AlignTy = EltType;
4647 }
4648
4649 if (AlignTy)
4650 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4651 return CharUnits::fromQuantity(4);
4652 }
4653
classifyReturnType(QualType RetTy) const4654 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4655 uint64_t Size;
4656
4657 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4658 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4659 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4660 // System V ABI (1995), page 3-22, specified:
4661 // > A structure or union whose size is less than or equal to 8 bytes
4662 // > shall be returned in r3 and r4, as if it were first stored in the
4663 // > 8-byte aligned memory area and then the low addressed word were
4664 // > loaded into r3 and the high-addressed word into r4. Bits beyond
4665 // > the last member of the structure or union are not defined.
4666 //
4667 // GCC for big-endian PPC32 inserts the pad before the first member,
4668 // not "beyond the last member" of the struct. To stay compatible
4669 // with GCC, we coerce the struct to an integer of the same size.
4670 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4671 if (Size == 0)
4672 return ABIArgInfo::getIgnore();
4673 else {
4674 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4675 return ABIArgInfo::getDirect(CoerceTy);
4676 }
4677 }
4678
4679 return DefaultABIInfo::classifyReturnType(RetTy);
4680 }
4681
4682 // TODO: this implementation is now likely redundant with
4683 // DefaultABIInfo::EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAList,QualType Ty) const4684 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4685 QualType Ty) const {
4686 if (getTarget().getTriple().isOSDarwin()) {
4687 auto TI = getContext().getTypeInfoInChars(Ty);
4688 TI.Align = getParamTypeAlignment(Ty);
4689
4690 CharUnits SlotSize = CharUnits::fromQuantity(4);
4691 return emitVoidPtrVAArg(CGF, VAList, Ty,
4692 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4693 /*AllowHigherAlign=*/true);
4694 }
4695
4696 const unsigned OverflowLimit = 8;
4697 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4698 // TODO: Implement this. For now ignore.
4699 (void)CTy;
4700 return Address::invalid(); // FIXME?
4701 }
4702
4703 // struct __va_list_tag {
4704 // unsigned char gpr;
4705 // unsigned char fpr;
4706 // unsigned short reserved;
4707 // void *overflow_arg_area;
4708 // void *reg_save_area;
4709 // };
4710
4711 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4712 bool isInt = !Ty->isFloatingType();
4713 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4714
4715 // All aggregates are passed indirectly? That doesn't seem consistent
4716 // with the argument-lowering code.
4717 bool isIndirect = isAggregateTypeForABI(Ty);
4718
4719 CGBuilderTy &Builder = CGF.Builder;
4720
4721 // The calling convention either uses 1-2 GPRs or 1 FPR.
4722 Address NumRegsAddr = Address::invalid();
4723 if (isInt || IsSoftFloatABI) {
4724 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4725 } else {
4726 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4727 }
4728
4729 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4730
4731 // "Align" the register count when TY is i64.
4732 if (isI64 || (isF64 && IsSoftFloatABI)) {
4733 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4734 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4735 }
4736
4737 llvm::Value *CC =
4738 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4739
4740 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4741 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4742 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4743
4744 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4745
4746 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4747 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4748
4749 // Case 1: consume registers.
4750 Address RegAddr = Address::invalid();
4751 {
4752 CGF.EmitBlock(UsingRegs);
4753
4754 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4755 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4756 CharUnits::fromQuantity(8));
4757 assert(RegAddr.getElementType() == CGF.Int8Ty);
4758
4759 // Floating-point registers start after the general-purpose registers.
4760 if (!(isInt || IsSoftFloatABI)) {
4761 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4762 CharUnits::fromQuantity(32));
4763 }
4764
4765 // Get the address of the saved value by scaling the number of
4766 // registers we've used by the number of
4767 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4768 llvm::Value *RegOffset =
4769 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4770 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4771 RegAddr.getPointer(), RegOffset),
4772 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4773 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4774
4775 // Increase the used-register count.
4776 NumRegs =
4777 Builder.CreateAdd(NumRegs,
4778 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4779 Builder.CreateStore(NumRegs, NumRegsAddr);
4780
4781 CGF.EmitBranch(Cont);
4782 }
4783
4784 // Case 2: consume space in the overflow area.
4785 Address MemAddr = Address::invalid();
4786 {
4787 CGF.EmitBlock(UsingOverflow);
4788
4789 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4790
4791 // Everything in the overflow area is rounded up to a size of at least 4.
4792 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4793
4794 CharUnits Size;
4795 if (!isIndirect) {
4796 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4797 Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
4798 } else {
4799 Size = CGF.getPointerSize();
4800 }
4801
4802 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4803 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4804 OverflowAreaAlign);
4805 // Round up address of argument to alignment
4806 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4807 if (Align > OverflowAreaAlign) {
4808 llvm::Value *Ptr = OverflowArea.getPointer();
4809 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4810 Align);
4811 }
4812
4813 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4814
4815 // Increase the overflow area.
4816 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4817 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4818 CGF.EmitBranch(Cont);
4819 }
4820
4821 CGF.EmitBlock(Cont);
4822
4823 // Merge the cases with a phi.
4824 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4825 "vaarg.addr");
4826
4827 // Load the pointer if the argument was passed indirectly.
4828 if (isIndirect) {
4829 Result = Address(Builder.CreateLoad(Result, "aggr"),
4830 getContext().getTypeAlignInChars(Ty));
4831 }
4832
4833 return Result;
4834 }
4835
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)4836 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4837 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4838 assert(Triple.isPPC32());
4839
4840 switch (Opts.getStructReturnConvention()) {
4841 case CodeGenOptions::SRCK_Default:
4842 break;
4843 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4844 return false;
4845 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4846 return true;
4847 }
4848
4849 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4850 return true;
4851
4852 return false;
4853 }
4854
4855 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4856 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4857 llvm::Value *Address) const {
4858 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4859 /*IsAIX*/ false);
4860 }
4861
4862 // PowerPC-64
4863
4864 namespace {
4865 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4866 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4867 public:
4868 enum ABIKind {
4869 ELFv1 = 0,
4870 ELFv2
4871 };
4872
4873 private:
4874 static const unsigned GPRBits = 64;
4875 ABIKind Kind;
4876 bool IsSoftFloatABI;
4877
4878 public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,ABIKind Kind,bool SoftFloatABI)4879 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
4880 bool SoftFloatABI)
4881 : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
4882
4883 bool isPromotableTypeForABI(QualType Ty) const;
4884 CharUnits getParamTypeAlignment(QualType Ty) const;
4885
4886 ABIArgInfo classifyReturnType(QualType RetTy) const;
4887 ABIArgInfo classifyArgumentType(QualType Ty) const;
4888
4889 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4890 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4891 uint64_t Members) const override;
4892
4893 // TODO: We can add more logic to computeInfo to improve performance.
4894 // Example: For aggregate arguments that fit in a register, we could
4895 // use getDirectInReg (as is done below for structs containing a single
4896 // floating-point value) to avoid pushing them to memory on function
4897 // entry. This would require changing the logic in PPCISelLowering
4898 // when lowering the parameters in the caller and args in the callee.
computeInfo(CGFunctionInfo & FI) const4899 void computeInfo(CGFunctionInfo &FI) const override {
4900 if (!getCXXABI().classifyReturnType(FI))
4901 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4902 for (auto &I : FI.arguments()) {
4903 // We rely on the default argument classification for the most part.
4904 // One exception: An aggregate containing a single floating-point
4905 // or vector item must be passed in a register if one is available.
4906 const Type *T = isSingleElementStruct(I.type, getContext());
4907 if (T) {
4908 const BuiltinType *BT = T->getAs<BuiltinType>();
4909 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4910 (BT && BT->isFloatingPoint())) {
4911 QualType QT(T, 0);
4912 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4913 continue;
4914 }
4915 }
4916 I.info = classifyArgumentType(I.type);
4917 }
4918 }
4919
4920 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4921 QualType Ty) const override;
4922
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const4923 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4924 bool asReturnValue) const override {
4925 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4926 }
4927
isSwiftErrorInRegister() const4928 bool isSwiftErrorInRegister() const override {
4929 return false;
4930 }
4931 };
4932
4933 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4934
4935 public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes & CGT,PPC64_SVR4_ABIInfo::ABIKind Kind,bool SoftFloatABI)4936 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4937 PPC64_SVR4_ABIInfo::ABIKind Kind,
4938 bool SoftFloatABI)
4939 : TargetCodeGenInfo(
4940 std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {}
4941
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4942 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4943 // This is recovered from gcc output.
4944 return 1; // r1 is the dedicated stack pointer
4945 }
4946
4947 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4948 llvm::Value *Address) const override;
4949 };
4950
4951 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4952 public:
PPC64TargetCodeGenInfo(CodeGenTypes & CGT)4953 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4954
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4955 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4956 // This is recovered from gcc output.
4957 return 1; // r1 is the dedicated stack pointer
4958 }
4959
4960 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4961 llvm::Value *Address) const override;
4962 };
4963
4964 }
4965
4966 // Return true if the ABI requires Ty to be passed sign- or zero-
4967 // extended to 64 bits.
4968 bool
isPromotableTypeForABI(QualType Ty) const4969 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4970 // Treat an enum type as its underlying type.
4971 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4972 Ty = EnumTy->getDecl()->getIntegerType();
4973
4974 // Promotable integer types are required to be promoted by the ABI.
4975 if (isPromotableIntegerTypeForABI(Ty))
4976 return true;
4977
4978 // In addition to the usual promotable integer types, we also need to
4979 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4980 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4981 switch (BT->getKind()) {
4982 case BuiltinType::Int:
4983 case BuiltinType::UInt:
4984 return true;
4985 default:
4986 break;
4987 }
4988
4989 if (const auto *EIT = Ty->getAs<ExtIntType>())
4990 if (EIT->getNumBits() < 64)
4991 return true;
4992
4993 return false;
4994 }
4995
4996 /// isAlignedParamType - Determine whether a type requires 16-byte or
4997 /// higher alignment in the parameter area. Always returns at least 8.
getParamTypeAlignment(QualType Ty) const4998 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4999 // Complex types are passed just like their elements.
5000 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5001 Ty = CTy->getElementType();
5002
5003 // Only vector types of size 16 bytes need alignment (larger types are
5004 // passed via reference, smaller types are not aligned).
5005 if (Ty->isVectorType()) {
5006 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5007 } else if (Ty->isRealFloatingType() &&
5008 &getContext().getFloatTypeSemantics(Ty) ==
5009 &llvm::APFloat::IEEEquad()) {
5010 // According to ABI document section 'Optional Save Areas': If extended
5011 // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
5012 // format are supported, map them to a single quadword, quadword aligned.
5013 return CharUnits::fromQuantity(16);
5014 }
5015
5016 // For single-element float/vector structs, we consider the whole type
5017 // to have the same alignment requirements as its single element.
5018 const Type *AlignAsType = nullptr;
5019 const Type *EltType = isSingleElementStruct(Ty, getContext());
5020 if (EltType) {
5021 const BuiltinType *BT = EltType->getAs<BuiltinType>();
5022 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
5023 (BT && BT->isFloatingPoint()))
5024 AlignAsType = EltType;
5025 }
5026
5027 // Likewise for ELFv2 homogeneous aggregates.
5028 const Type *Base = nullptr;
5029 uint64_t Members = 0;
5030 if (!AlignAsType && Kind == ELFv2 &&
5031 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5032 AlignAsType = Base;
5033
5034 // With special case aggregates, only vector base types need alignment.
5035 if (AlignAsType) {
5036 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
5037 }
5038
5039 // Otherwise, we only need alignment for any aggregate type that
5040 // has an alignment requirement of >= 16 bytes.
5041 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5042 return CharUnits::fromQuantity(16);
5043 }
5044
5045 return CharUnits::fromQuantity(8);
5046 }
5047
5048 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5049 /// aggregate. Base is set to the base element type, and Members is set
5050 /// to the number of base elements.
isHomogeneousAggregate(QualType Ty,const Type * & Base,uint64_t & Members) const5051 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
5052 uint64_t &Members) const {
5053 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5054 uint64_t NElements = AT->getSize().getZExtValue();
5055 if (NElements == 0)
5056 return false;
5057 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5058 return false;
5059 Members *= NElements;
5060 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5061 const RecordDecl *RD = RT->getDecl();
5062 if (RD->hasFlexibleArrayMember())
5063 return false;
5064
5065 Members = 0;
5066
5067 // If this is a C++ record, check the properties of the record such as
5068 // bases and ABI specific restrictions
5069 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5070 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
5071 return false;
5072
5073 for (const auto &I : CXXRD->bases()) {
5074 // Ignore empty records.
5075 if (isEmptyRecord(getContext(), I.getType(), true))
5076 continue;
5077
5078 uint64_t FldMembers;
5079 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5080 return false;
5081
5082 Members += FldMembers;
5083 }
5084 }
5085
5086 for (const auto *FD : RD->fields()) {
5087 // Ignore (non-zero arrays of) empty records.
5088 QualType FT = FD->getType();
5089 while (const ConstantArrayType *AT =
5090 getContext().getAsConstantArrayType(FT)) {
5091 if (AT->getSize().getZExtValue() == 0)
5092 return false;
5093 FT = AT->getElementType();
5094 }
5095 if (isEmptyRecord(getContext(), FT, true))
5096 continue;
5097
5098 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5099 if (getContext().getLangOpts().CPlusPlus &&
5100 FD->isZeroLengthBitField(getContext()))
5101 continue;
5102
5103 uint64_t FldMembers;
5104 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5105 return false;
5106
5107 Members = (RD->isUnion() ?
5108 std::max(Members, FldMembers) : Members + FldMembers);
5109 }
5110
5111 if (!Base)
5112 return false;
5113
5114 // Ensure there is no padding.
5115 if (getContext().getTypeSize(Base) * Members !=
5116 getContext().getTypeSize(Ty))
5117 return false;
5118 } else {
5119 Members = 1;
5120 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5121 Members = 2;
5122 Ty = CT->getElementType();
5123 }
5124
5125 // Most ABIs only support float, double, and some vector type widths.
5126 if (!isHomogeneousAggregateBaseType(Ty))
5127 return false;
5128
5129 // The base type must be the same for all members. Types that
5130 // agree in both total size and mode (float vs. vector) are
5131 // treated as being equivalent here.
5132 const Type *TyPtr = Ty.getTypePtr();
5133 if (!Base) {
5134 Base = TyPtr;
5135 // If it's a non-power-of-2 vector, its size is already a power-of-2,
5136 // so make sure to widen it explicitly.
5137 if (const VectorType *VT = Base->getAs<VectorType>()) {
5138 QualType EltTy = VT->getElementType();
5139 unsigned NumElements =
5140 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5141 Base = getContext()
5142 .getVectorType(EltTy, NumElements, VT->getVectorKind())
5143 .getTypePtr();
5144 }
5145 }
5146
5147 if (Base->isVectorType() != TyPtr->isVectorType() ||
5148 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5149 return false;
5150 }
5151 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5152 }
5153
isHomogeneousAggregateBaseType(QualType Ty) const5154 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5155 // Homogeneous aggregates for ELFv2 must have base types of float,
5156 // double, long double, or 128-bit vectors.
5157 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5158 if (BT->getKind() == BuiltinType::Float ||
5159 BT->getKind() == BuiltinType::Double ||
5160 BT->getKind() == BuiltinType::LongDouble ||
5161 (getContext().getTargetInfo().hasFloat128Type() &&
5162 (BT->getKind() == BuiltinType::Float128))) {
5163 if (IsSoftFloatABI)
5164 return false;
5165 return true;
5166 }
5167 }
5168 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5169 if (getContext().getTypeSize(VT) == 128)
5170 return true;
5171 }
5172 return false;
5173 }
5174
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const5175 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5176 const Type *Base, uint64_t Members) const {
5177 // Vector and fp128 types require one register, other floating point types
5178 // require one or two registers depending on their size.
5179 uint32_t NumRegs =
5180 ((getContext().getTargetInfo().hasFloat128Type() &&
5181 Base->isFloat128Type()) ||
5182 Base->isVectorType()) ? 1
5183 : (getContext().getTypeSize(Base) + 63) / 64;
5184
5185 // Homogeneous Aggregates may occupy at most 8 registers.
5186 return Members * NumRegs <= 8;
5187 }
5188
5189 ABIArgInfo
classifyArgumentType(QualType Ty) const5190 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
5191 Ty = useFirstFieldIfTransparentUnion(Ty);
5192
5193 if (Ty->isAnyComplexType())
5194 return ABIArgInfo::getDirect();
5195
5196 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5197 // or via reference (larger than 16 bytes).
5198 if (Ty->isVectorType()) {
5199 uint64_t Size = getContext().getTypeSize(Ty);
5200 if (Size > 128)
5201 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5202 else if (Size < 128) {
5203 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5204 return ABIArgInfo::getDirect(CoerceTy);
5205 }
5206 }
5207
5208 if (const auto *EIT = Ty->getAs<ExtIntType>())
5209 if (EIT->getNumBits() > 128)
5210 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5211
5212 if (isAggregateTypeForABI(Ty)) {
5213 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5214 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5215
5216 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5217 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5218
5219 // ELFv2 homogeneous aggregates are passed as array types.
5220 const Type *Base = nullptr;
5221 uint64_t Members = 0;
5222 if (Kind == ELFv2 &&
5223 isHomogeneousAggregate(Ty, Base, Members)) {
5224 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5225 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5226 return ABIArgInfo::getDirect(CoerceTy);
5227 }
5228
5229 // If an aggregate may end up fully in registers, we do not
5230 // use the ByVal method, but pass the aggregate as array.
5231 // This is usually beneficial since we avoid forcing the
5232 // back-end to store the argument to memory.
5233 uint64_t Bits = getContext().getTypeSize(Ty);
5234 if (Bits > 0 && Bits <= 8 * GPRBits) {
5235 llvm::Type *CoerceTy;
5236
5237 // Types up to 8 bytes are passed as integer type (which will be
5238 // properly aligned in the argument save area doubleword).
5239 if (Bits <= GPRBits)
5240 CoerceTy =
5241 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5242 // Larger types are passed as arrays, with the base type selected
5243 // according to the required alignment in the save area.
5244 else {
5245 uint64_t RegBits = ABIAlign * 8;
5246 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5247 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5248 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5249 }
5250
5251 return ABIArgInfo::getDirect(CoerceTy);
5252 }
5253
5254 // All other aggregates are passed ByVal.
5255 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5256 /*ByVal=*/true,
5257 /*Realign=*/TyAlign > ABIAlign);
5258 }
5259
5260 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5261 : ABIArgInfo::getDirect());
5262 }
5263
5264 ABIArgInfo
classifyReturnType(QualType RetTy) const5265 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
5266 if (RetTy->isVoidType())
5267 return ABIArgInfo::getIgnore();
5268
5269 if (RetTy->isAnyComplexType())
5270 return ABIArgInfo::getDirect();
5271
5272 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5273 // or via reference (larger than 16 bytes).
5274 if (RetTy->isVectorType()) {
5275 uint64_t Size = getContext().getTypeSize(RetTy);
5276 if (Size > 128)
5277 return getNaturalAlignIndirect(RetTy);
5278 else if (Size < 128) {
5279 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5280 return ABIArgInfo::getDirect(CoerceTy);
5281 }
5282 }
5283
5284 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5285 if (EIT->getNumBits() > 128)
5286 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5287
5288 if (isAggregateTypeForABI(RetTy)) {
5289 // ELFv2 homogeneous aggregates are returned as array types.
5290 const Type *Base = nullptr;
5291 uint64_t Members = 0;
5292 if (Kind == ELFv2 &&
5293 isHomogeneousAggregate(RetTy, Base, Members)) {
5294 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5295 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5296 return ABIArgInfo::getDirect(CoerceTy);
5297 }
5298
5299 // ELFv2 small aggregates are returned in up to two registers.
5300 uint64_t Bits = getContext().getTypeSize(RetTy);
5301 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5302 if (Bits == 0)
5303 return ABIArgInfo::getIgnore();
5304
5305 llvm::Type *CoerceTy;
5306 if (Bits > GPRBits) {
5307 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5308 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5309 } else
5310 CoerceTy =
5311 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5312 return ABIArgInfo::getDirect(CoerceTy);
5313 }
5314
5315 // All other aggregates are returned indirectly.
5316 return getNaturalAlignIndirect(RetTy);
5317 }
5318
5319 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5320 : ABIArgInfo::getDirect());
5321 }
5322
5323 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5324 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5325 QualType Ty) const {
5326 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5327 TypeInfo.Align = getParamTypeAlignment(Ty);
5328
5329 CharUnits SlotSize = CharUnits::fromQuantity(8);
5330
5331 // If we have a complex type and the base type is smaller than 8 bytes,
5332 // the ABI calls for the real and imaginary parts to be right-adjusted
5333 // in separate doublewords. However, Clang expects us to produce a
5334 // pointer to a structure with the two parts packed tightly. So generate
5335 // loads of the real and imaginary parts relative to the va_list pointer,
5336 // and store them to a temporary structure.
5337 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5338 CharUnits EltSize = TypeInfo.Width / 2;
5339 if (EltSize < SlotSize) {
5340 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
5341 SlotSize * 2, SlotSize,
5342 SlotSize, /*AllowHigher*/ true);
5343
5344 Address RealAddr = Addr;
5345 Address ImagAddr = RealAddr;
5346 if (CGF.CGM.getDataLayout().isBigEndian()) {
5347 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
5348 SlotSize - EltSize);
5349 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
5350 2 * SlotSize - EltSize);
5351 } else {
5352 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
5353 }
5354
5355 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
5356 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
5357 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
5358 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
5359 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
5360
5361 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
5362 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
5363 /*init*/ true);
5364 return Temp;
5365 }
5366 }
5367
5368 // Otherwise, just use the general rule.
5369 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5370 TypeInfo, SlotSize, /*AllowHigher*/ true);
5371 }
5372
5373 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const5374 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5375 CodeGen::CodeGenFunction &CGF,
5376 llvm::Value *Address) const {
5377 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5378 /*IsAIX*/ false);
5379 }
5380
5381 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const5382 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5383 llvm::Value *Address) const {
5384 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5385 /*IsAIX*/ false);
5386 }
5387
5388 //===----------------------------------------------------------------------===//
5389 // AArch64 ABI Implementation
5390 //===----------------------------------------------------------------------===//
5391
5392 namespace {
5393
5394 class AArch64ABIInfo : public SwiftABIInfo {
5395 public:
5396 enum ABIKind {
5397 AAPCS = 0,
5398 DarwinPCS,
5399 Win64
5400 };
5401
5402 private:
5403 ABIKind Kind;
5404
5405 public:
AArch64ABIInfo(CodeGenTypes & CGT,ABIKind Kind)5406 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5407 : SwiftABIInfo(CGT), Kind(Kind) {}
5408
5409 private:
getABIKind() const5410 ABIKind getABIKind() const { return Kind; }
isDarwinPCS() const5411 bool isDarwinPCS() const { return Kind == DarwinPCS; }
5412
5413 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5414 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5415 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5416 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5417 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5418 uint64_t Members) const override;
5419
5420 bool isIllegalVectorType(QualType Ty) const;
5421
computeInfo(CGFunctionInfo & FI) const5422 void computeInfo(CGFunctionInfo &FI) const override {
5423 if (!::classifyReturnType(getCXXABI(), FI, *this))
5424 FI.getReturnInfo() =
5425 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5426
5427 for (auto &it : FI.arguments())
5428 it.info = classifyArgumentType(it.type);
5429 }
5430
5431 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5432 CodeGenFunction &CGF) const;
5433
5434 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5435 CodeGenFunction &CGF) const;
5436
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5437 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5438 QualType Ty) const override {
5439 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5440 if (isa<llvm::ScalableVectorType>(BaseTy))
5441 llvm::report_fatal_error("Passing SVE types to variadic functions is "
5442 "currently not supported");
5443
5444 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5445 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5446 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5447 }
5448
5449 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5450 QualType Ty) const override;
5451
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const5452 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5453 bool asReturnValue) const override {
5454 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5455 }
isSwiftErrorInRegister() const5456 bool isSwiftErrorInRegister() const override {
5457 return true;
5458 }
5459
5460 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5461 unsigned elts) const override;
5462
allowBFloatArgsAndRet() const5463 bool allowBFloatArgsAndRet() const override {
5464 return getTarget().hasBFloat16Type();
5465 }
5466 };
5467
5468 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5469 public:
AArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind Kind)5470 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5471 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5472
getARCRetainAutoreleasedReturnValueMarker() const5473 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5474 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5475 }
5476
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const5477 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5478 return 31;
5479 }
5480
doesReturnSlotInterfereWithArgs() const5481 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5482
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5483 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5484 CodeGen::CodeGenModule &CGM) const override {
5485 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5486 if (!FD)
5487 return;
5488
5489 const auto *TA = FD->getAttr<TargetAttr>();
5490 if (TA == nullptr)
5491 return;
5492
5493 ParsedTargetAttr Attr = TA->parse();
5494 if (Attr.BranchProtection.empty())
5495 return;
5496
5497 TargetInfo::BranchProtectionInfo BPI;
5498 StringRef Error;
5499 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5500 BPI, Error);
5501 assert(Error.empty());
5502
5503 auto *Fn = cast<llvm::Function>(GV);
5504 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
5505 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
5506
5507 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
5508 Fn->addFnAttr("sign-return-address-key",
5509 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
5510 ? "a_key"
5511 : "b_key");
5512 }
5513
5514 Fn->addFnAttr("branch-target-enforcement",
5515 BPI.BranchTargetEnforcement ? "true" : "false");
5516 }
5517 };
5518
5519 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5520 public:
WindowsAArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind K)5521 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5522 : AArch64TargetCodeGenInfo(CGT, K) {}
5523
5524 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5525 CodeGen::CodeGenModule &CGM) const override;
5526
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const5527 void getDependentLibraryOption(llvm::StringRef Lib,
5528 llvm::SmallString<24> &Opt) const override {
5529 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5530 }
5531
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const5532 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5533 llvm::SmallString<32> &Opt) const override {
5534 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5535 }
5536 };
5537
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5538 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5539 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5540 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5541 if (GV->isDeclaration())
5542 return;
5543 addStackProbeTargetAttributes(D, GV, CGM);
5544 }
5545 }
5546
coerceIllegalVector(QualType Ty) const5547 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
5548 assert(Ty->isVectorType() && "expected vector type!");
5549
5550 const auto *VT = Ty->castAs<VectorType>();
5551 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
5552 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
5553 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
5554 BuiltinType::UChar &&
5555 "unexpected builtin type for SVE predicate!");
5556 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
5557 llvm::Type::getInt1Ty(getVMContext()), 16));
5558 }
5559
5560 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
5561 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
5562
5563 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
5564 llvm::ScalableVectorType *ResType = nullptr;
5565 switch (BT->getKind()) {
5566 default:
5567 llvm_unreachable("unexpected builtin type for SVE vector!");
5568 case BuiltinType::SChar:
5569 case BuiltinType::UChar:
5570 ResType = llvm::ScalableVectorType::get(
5571 llvm::Type::getInt8Ty(getVMContext()), 16);
5572 break;
5573 case BuiltinType::Short:
5574 case BuiltinType::UShort:
5575 ResType = llvm::ScalableVectorType::get(
5576 llvm::Type::getInt16Ty(getVMContext()), 8);
5577 break;
5578 case BuiltinType::Int:
5579 case BuiltinType::UInt:
5580 ResType = llvm::ScalableVectorType::get(
5581 llvm::Type::getInt32Ty(getVMContext()), 4);
5582 break;
5583 case BuiltinType::Long:
5584 case BuiltinType::ULong:
5585 ResType = llvm::ScalableVectorType::get(
5586 llvm::Type::getInt64Ty(getVMContext()), 2);
5587 break;
5588 case BuiltinType::Half:
5589 ResType = llvm::ScalableVectorType::get(
5590 llvm::Type::getHalfTy(getVMContext()), 8);
5591 break;
5592 case BuiltinType::Float:
5593 ResType = llvm::ScalableVectorType::get(
5594 llvm::Type::getFloatTy(getVMContext()), 4);
5595 break;
5596 case BuiltinType::Double:
5597 ResType = llvm::ScalableVectorType::get(
5598 llvm::Type::getDoubleTy(getVMContext()), 2);
5599 break;
5600 case BuiltinType::BFloat16:
5601 ResType = llvm::ScalableVectorType::get(
5602 llvm::Type::getBFloatTy(getVMContext()), 8);
5603 break;
5604 }
5605 return ABIArgInfo::getDirect(ResType);
5606 }
5607
5608 uint64_t Size = getContext().getTypeSize(Ty);
5609 // Android promotes <2 x i8> to i16, not i32
5610 if (isAndroid() && (Size <= 16)) {
5611 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5612 return ABIArgInfo::getDirect(ResType);
5613 }
5614 if (Size <= 32) {
5615 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5616 return ABIArgInfo::getDirect(ResType);
5617 }
5618 if (Size == 64) {
5619 auto *ResType =
5620 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5621 return ABIArgInfo::getDirect(ResType);
5622 }
5623 if (Size == 128) {
5624 auto *ResType =
5625 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5626 return ABIArgInfo::getDirect(ResType);
5627 }
5628 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5629 }
5630
classifyArgumentType(QualType Ty) const5631 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5632 Ty = useFirstFieldIfTransparentUnion(Ty);
5633
5634 // Handle illegal vector types here.
5635 if (isIllegalVectorType(Ty))
5636 return coerceIllegalVector(Ty);
5637
5638 if (!isAggregateTypeForABI(Ty)) {
5639 // Treat an enum type as its underlying type.
5640 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5641 Ty = EnumTy->getDecl()->getIntegerType();
5642
5643 if (const auto *EIT = Ty->getAs<ExtIntType>())
5644 if (EIT->getNumBits() > 128)
5645 return getNaturalAlignIndirect(Ty);
5646
5647 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
5648 ? ABIArgInfo::getExtend(Ty)
5649 : ABIArgInfo::getDirect());
5650 }
5651
5652 // Structures with either a non-trivial destructor or a non-trivial
5653 // copy constructor are always indirect.
5654 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5655 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5656 CGCXXABI::RAA_DirectInMemory);
5657 }
5658
5659 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5660 // elsewhere for GNU compatibility.
5661 uint64_t Size = getContext().getTypeSize(Ty);
5662 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5663 if (IsEmpty || Size == 0) {
5664 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5665 return ABIArgInfo::getIgnore();
5666
5667 // GNU C mode. The only argument that gets ignored is an empty one with size
5668 // 0.
5669 if (IsEmpty && Size == 0)
5670 return ABIArgInfo::getIgnore();
5671 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5672 }
5673
5674 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5675 const Type *Base = nullptr;
5676 uint64_t Members = 0;
5677 if (isHomogeneousAggregate(Ty, Base, Members)) {
5678 return ABIArgInfo::getDirect(
5679 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5680 }
5681
5682 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5683 if (Size <= 128) {
5684 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5685 // same size and alignment.
5686 if (getTarget().isRenderScriptTarget()) {
5687 return coerceToIntArray(Ty, getContext(), getVMContext());
5688 }
5689 unsigned Alignment;
5690 if (Kind == AArch64ABIInfo::AAPCS) {
5691 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5692 Alignment = Alignment < 128 ? 64 : 128;
5693 } else {
5694 Alignment = std::max(getContext().getTypeAlign(Ty),
5695 (unsigned)getTarget().getPointerWidth(0));
5696 }
5697 Size = llvm::alignTo(Size, Alignment);
5698
5699 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5700 // For aggregates with 16-byte alignment, we use i128.
5701 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5702 return ABIArgInfo::getDirect(
5703 Size == Alignment ? BaseTy
5704 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5705 }
5706
5707 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5708 }
5709
classifyReturnType(QualType RetTy,bool IsVariadic) const5710 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5711 bool IsVariadic) const {
5712 if (RetTy->isVoidType())
5713 return ABIArgInfo::getIgnore();
5714
5715 if (const auto *VT = RetTy->getAs<VectorType>()) {
5716 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5717 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5718 return coerceIllegalVector(RetTy);
5719 }
5720
5721 // Large vector types should be returned via memory.
5722 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5723 return getNaturalAlignIndirect(RetTy);
5724
5725 if (!isAggregateTypeForABI(RetTy)) {
5726 // Treat an enum type as its underlying type.
5727 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5728 RetTy = EnumTy->getDecl()->getIntegerType();
5729
5730 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5731 if (EIT->getNumBits() > 128)
5732 return getNaturalAlignIndirect(RetTy);
5733
5734 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
5735 ? ABIArgInfo::getExtend(RetTy)
5736 : ABIArgInfo::getDirect());
5737 }
5738
5739 uint64_t Size = getContext().getTypeSize(RetTy);
5740 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5741 return ABIArgInfo::getIgnore();
5742
5743 const Type *Base = nullptr;
5744 uint64_t Members = 0;
5745 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5746 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5747 IsVariadic))
5748 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5749 return ABIArgInfo::getDirect();
5750
5751 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5752 if (Size <= 128) {
5753 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5754 // same size and alignment.
5755 if (getTarget().isRenderScriptTarget()) {
5756 return coerceToIntArray(RetTy, getContext(), getVMContext());
5757 }
5758 unsigned Alignment = getContext().getTypeAlign(RetTy);
5759 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5760
5761 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5762 // For aggregates with 16-byte alignment, we use i128.
5763 if (Alignment < 128 && Size == 128) {
5764 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5765 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5766 }
5767 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5768 }
5769
5770 return getNaturalAlignIndirect(RetTy);
5771 }
5772
5773 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
isIllegalVectorType(QualType Ty) const5774 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5775 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5776 // Check whether VT is a fixed-length SVE vector. These types are
5777 // represented as scalable vectors in function args/return and must be
5778 // coerced from fixed vectors.
5779 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5780 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5781 return true;
5782
5783 // Check whether VT is legal.
5784 unsigned NumElements = VT->getNumElements();
5785 uint64_t Size = getContext().getTypeSize(VT);
5786 // NumElements should be power of 2.
5787 if (!llvm::isPowerOf2_32(NumElements))
5788 return true;
5789
5790 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5791 // vectors for some reason.
5792 llvm::Triple Triple = getTarget().getTriple();
5793 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5794 Triple.isOSBinFormatMachO())
5795 return Size <= 32;
5796
5797 return Size != 64 && (Size != 128 || NumElements == 1);
5798 }
5799 return false;
5800 }
5801
isLegalVectorTypeForSwift(CharUnits totalSize,llvm::Type * eltTy,unsigned elts) const5802 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5803 llvm::Type *eltTy,
5804 unsigned elts) const {
5805 if (!llvm::isPowerOf2_32(elts))
5806 return false;
5807 if (totalSize.getQuantity() != 8 &&
5808 (totalSize.getQuantity() != 16 || elts == 1))
5809 return false;
5810 return true;
5811 }
5812
isHomogeneousAggregateBaseType(QualType Ty) const5813 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5814 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5815 // point type or a short-vector type. This is the same as the 32-bit ABI,
5816 // but with the difference that any floating-point type is allowed,
5817 // including __fp16.
5818 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5819 if (BT->isFloatingPoint())
5820 return true;
5821 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5822 unsigned VecSize = getContext().getTypeSize(VT);
5823 if (VecSize == 64 || VecSize == 128)
5824 return true;
5825 }
5826 return false;
5827 }
5828
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const5829 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5830 uint64_t Members) const {
5831 return Members <= 4;
5832 }
5833
EmitAAPCSVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const5834 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5835 QualType Ty,
5836 CodeGenFunction &CGF) const {
5837 ABIArgInfo AI = classifyArgumentType(Ty);
5838 bool IsIndirect = AI.isIndirect();
5839
5840 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5841 if (IsIndirect)
5842 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5843 else if (AI.getCoerceToType())
5844 BaseTy = AI.getCoerceToType();
5845
5846 unsigned NumRegs = 1;
5847 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5848 BaseTy = ArrTy->getElementType();
5849 NumRegs = ArrTy->getNumElements();
5850 }
5851 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5852
5853 // The AArch64 va_list type and handling is specified in the Procedure Call
5854 // Standard, section B.4:
5855 //
5856 // struct {
5857 // void *__stack;
5858 // void *__gr_top;
5859 // void *__vr_top;
5860 // int __gr_offs;
5861 // int __vr_offs;
5862 // };
5863
5864 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5865 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5866 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5867 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5868
5869 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5870 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5871
5872 Address reg_offs_p = Address::invalid();
5873 llvm::Value *reg_offs = nullptr;
5874 int reg_top_index;
5875 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5876 if (!IsFPR) {
5877 // 3 is the field number of __gr_offs
5878 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5879 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5880 reg_top_index = 1; // field number for __gr_top
5881 RegSize = llvm::alignTo(RegSize, 8);
5882 } else {
5883 // 4 is the field number of __vr_offs.
5884 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5885 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5886 reg_top_index = 2; // field number for __vr_top
5887 RegSize = 16 * NumRegs;
5888 }
5889
5890 //=======================================
5891 // Find out where argument was passed
5892 //=======================================
5893
5894 // If reg_offs >= 0 we're already using the stack for this type of
5895 // argument. We don't want to keep updating reg_offs (in case it overflows,
5896 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5897 // whatever they get).
5898 llvm::Value *UsingStack = nullptr;
5899 UsingStack = CGF.Builder.CreateICmpSGE(
5900 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5901
5902 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5903
5904 // Otherwise, at least some kind of argument could go in these registers, the
5905 // question is whether this particular type is too big.
5906 CGF.EmitBlock(MaybeRegBlock);
5907
5908 // Integer arguments may need to correct register alignment (for example a
5909 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5910 // align __gr_offs to calculate the potential address.
5911 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5912 int Align = TyAlign.getQuantity();
5913
5914 reg_offs = CGF.Builder.CreateAdd(
5915 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5916 "align_regoffs");
5917 reg_offs = CGF.Builder.CreateAnd(
5918 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5919 "aligned_regoffs");
5920 }
5921
5922 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5923 // The fact that this is done unconditionally reflects the fact that
5924 // allocating an argument to the stack also uses up all the remaining
5925 // registers of the appropriate kind.
5926 llvm::Value *NewOffset = nullptr;
5927 NewOffset = CGF.Builder.CreateAdd(
5928 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5929 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5930
5931 // Now we're in a position to decide whether this argument really was in
5932 // registers or not.
5933 llvm::Value *InRegs = nullptr;
5934 InRegs = CGF.Builder.CreateICmpSLE(
5935 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5936
5937 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5938
5939 //=======================================
5940 // Argument was in registers
5941 //=======================================
5942
5943 // Now we emit the code for if the argument was originally passed in
5944 // registers. First start the appropriate block:
5945 CGF.EmitBlock(InRegBlock);
5946
5947 llvm::Value *reg_top = nullptr;
5948 Address reg_top_p =
5949 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5950 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5951 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5952 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5953 Address RegAddr = Address::invalid();
5954 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5955
5956 if (IsIndirect) {
5957 // If it's been passed indirectly (actually a struct), whatever we find from
5958 // stored registers or on the stack will actually be a struct **.
5959 MemTy = llvm::PointerType::getUnqual(MemTy);
5960 }
5961
5962 const Type *Base = nullptr;
5963 uint64_t NumMembers = 0;
5964 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5965 if (IsHFA && NumMembers > 1) {
5966 // Homogeneous aggregates passed in registers will have their elements split
5967 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5968 // qN+1, ...). We reload and store into a temporary local variable
5969 // contiguously.
5970 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5971 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5972 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5973 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5974 Address Tmp = CGF.CreateTempAlloca(HFATy,
5975 std::max(TyAlign, BaseTyInfo.Align));
5976
5977 // On big-endian platforms, the value will be right-aligned in its slot.
5978 int Offset = 0;
5979 if (CGF.CGM.getDataLayout().isBigEndian() &&
5980 BaseTyInfo.Width.getQuantity() < 16)
5981 Offset = 16 - BaseTyInfo.Width.getQuantity();
5982
5983 for (unsigned i = 0; i < NumMembers; ++i) {
5984 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5985 Address LoadAddr =
5986 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5987 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5988
5989 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5990
5991 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5992 CGF.Builder.CreateStore(Elem, StoreAddr);
5993 }
5994
5995 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5996 } else {
5997 // Otherwise the object is contiguous in memory.
5998
5999 // It might be right-aligned in its slot.
6000 CharUnits SlotSize = BaseAddr.getAlignment();
6001 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
6002 (IsHFA || !isAggregateTypeForABI(Ty)) &&
6003 TySize < SlotSize) {
6004 CharUnits Offset = SlotSize - TySize;
6005 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
6006 }
6007
6008 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
6009 }
6010
6011 CGF.EmitBranch(ContBlock);
6012
6013 //=======================================
6014 // Argument was on the stack
6015 //=======================================
6016 CGF.EmitBlock(OnStackBlock);
6017
6018 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
6019 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
6020
6021 // Again, stack arguments may need realignment. In this case both integer and
6022 // floating-point ones might be affected.
6023 if (!IsIndirect && TyAlign.getQuantity() > 8) {
6024 int Align = TyAlign.getQuantity();
6025
6026 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
6027
6028 OnStackPtr = CGF.Builder.CreateAdd(
6029 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
6030 "align_stack");
6031 OnStackPtr = CGF.Builder.CreateAnd(
6032 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
6033 "align_stack");
6034
6035 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
6036 }
6037 Address OnStackAddr(OnStackPtr,
6038 std::max(CharUnits::fromQuantity(8), TyAlign));
6039
6040 // All stack slots are multiples of 8 bytes.
6041 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
6042 CharUnits StackSize;
6043 if (IsIndirect)
6044 StackSize = StackSlotSize;
6045 else
6046 StackSize = TySize.alignTo(StackSlotSize);
6047
6048 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
6049 llvm::Value *NewStack =
6050 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
6051
6052 // Write the new value of __stack for the next call to va_arg
6053 CGF.Builder.CreateStore(NewStack, stack_p);
6054
6055 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
6056 TySize < StackSlotSize) {
6057 CharUnits Offset = StackSlotSize - TySize;
6058 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
6059 }
6060
6061 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
6062
6063 CGF.EmitBranch(ContBlock);
6064
6065 //=======================================
6066 // Tidy up
6067 //=======================================
6068 CGF.EmitBlock(ContBlock);
6069
6070 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6071 OnStackAddr, OnStackBlock, "vaargs.addr");
6072
6073 if (IsIndirect)
6074 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
6075 TyAlign);
6076
6077 return ResAddr;
6078 }
6079
EmitDarwinVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const6080 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
6081 CodeGenFunction &CGF) const {
6082 // The backend's lowering doesn't support va_arg for aggregates or
6083 // illegal vector types. Lower VAArg here for these cases and use
6084 // the LLVM va_arg instruction for everything else.
6085 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
6086 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
6087
6088 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
6089 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
6090
6091 // Empty records are ignored for parameter passing purposes.
6092 if (isEmptyRecord(getContext(), Ty, true)) {
6093 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
6094 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6095 return Addr;
6096 }
6097
6098 // The size of the actual thing passed, which might end up just
6099 // being a pointer for indirect types.
6100 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6101
6102 // Arguments bigger than 16 bytes which aren't homogeneous
6103 // aggregates should be passed indirectly.
6104 bool IsIndirect = false;
6105 if (TyInfo.Width.getQuantity() > 16) {
6106 const Type *Base = nullptr;
6107 uint64_t Members = 0;
6108 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
6109 }
6110
6111 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
6112 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
6113 }
6114
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6115 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
6116 QualType Ty) const {
6117 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6118 CGF.getContext().getTypeInfoInChars(Ty),
6119 CharUnits::fromQuantity(8),
6120 /*allowHigherAlign*/ false);
6121 }
6122
6123 //===----------------------------------------------------------------------===//
6124 // ARM ABI Implementation
6125 //===----------------------------------------------------------------------===//
6126
6127 namespace {
6128
6129 class ARMABIInfo : public SwiftABIInfo {
6130 public:
6131 enum ABIKind {
6132 APCS = 0,
6133 AAPCS = 1,
6134 AAPCS_VFP = 2,
6135 AAPCS16_VFP = 3,
6136 };
6137
6138 private:
6139 ABIKind Kind;
6140 bool IsFloatABISoftFP;
6141
6142 public:
ARMABIInfo(CodeGenTypes & CGT,ABIKind _Kind)6143 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
6144 : SwiftABIInfo(CGT), Kind(_Kind) {
6145 setCCs();
6146 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
6147 CGT.getCodeGenOpts().FloatABI == ""; // default
6148 }
6149
isEABI() const6150 bool isEABI() const {
6151 switch (getTarget().getTriple().getEnvironment()) {
6152 case llvm::Triple::Android:
6153 case llvm::Triple::EABI:
6154 case llvm::Triple::EABIHF:
6155 case llvm::Triple::GNUEABI:
6156 case llvm::Triple::GNUEABIHF:
6157 case llvm::Triple::MuslEABI:
6158 case llvm::Triple::MuslEABIHF:
6159 return true;
6160 default:
6161 return false;
6162 }
6163 }
6164
isEABIHF() const6165 bool isEABIHF() const {
6166 switch (getTarget().getTriple().getEnvironment()) {
6167 case llvm::Triple::EABIHF:
6168 case llvm::Triple::GNUEABIHF:
6169 case llvm::Triple::MuslEABIHF:
6170 return true;
6171 default:
6172 return false;
6173 }
6174 }
6175
getABIKind() const6176 ABIKind getABIKind() const { return Kind; }
6177
allowBFloatArgsAndRet() const6178 bool allowBFloatArgsAndRet() const override {
6179 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
6180 }
6181
6182 private:
6183 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
6184 unsigned functionCallConv) const;
6185 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
6186 unsigned functionCallConv) const;
6187 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
6188 uint64_t Members) const;
6189 ABIArgInfo coerceIllegalVector(QualType Ty) const;
6190 bool isIllegalVectorType(QualType Ty) const;
6191 bool containsAnyFP16Vectors(QualType Ty) const;
6192
6193 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
6194 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
6195 uint64_t Members) const override;
6196
6197 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
6198
6199 void computeInfo(CGFunctionInfo &FI) const override;
6200
6201 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6202 QualType Ty) const override;
6203
6204 llvm::CallingConv::ID getLLVMDefaultCC() const;
6205 llvm::CallingConv::ID getABIDefaultCC() const;
6206 void setCCs();
6207
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const6208 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6209 bool asReturnValue) const override {
6210 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6211 }
isSwiftErrorInRegister() const6212 bool isSwiftErrorInRegister() const override {
6213 return true;
6214 }
6215 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
6216 unsigned elts) const override;
6217 };
6218
6219 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
6220 public:
ARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)6221 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6222 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
6223
getABIInfo() const6224 const ARMABIInfo &getABIInfo() const {
6225 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
6226 }
6227
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const6228 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6229 return 13;
6230 }
6231
getARCRetainAutoreleasedReturnValueMarker() const6232 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
6233 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
6234 }
6235
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const6236 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6237 llvm::Value *Address) const override {
6238 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6239
6240 // 0-15 are the 16 integer registers.
6241 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
6242 return false;
6243 }
6244
getSizeOfUnwindException() const6245 unsigned getSizeOfUnwindException() const override {
6246 if (getABIInfo().isEABI()) return 88;
6247 return TargetCodeGenInfo::getSizeOfUnwindException();
6248 }
6249
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const6250 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6251 CodeGen::CodeGenModule &CGM) const override {
6252 if (GV->isDeclaration())
6253 return;
6254 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6255 if (!FD)
6256 return;
6257
6258 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
6259 if (!Attr)
6260 return;
6261
6262 const char *Kind;
6263 switch (Attr->getInterrupt()) {
6264 case ARMInterruptAttr::Generic: Kind = ""; break;
6265 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
6266 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
6267 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
6268 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
6269 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
6270 }
6271
6272 llvm::Function *Fn = cast<llvm::Function>(GV);
6273
6274 Fn->addFnAttr("interrupt", Kind);
6275
6276 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
6277 if (ABI == ARMABIInfo::APCS)
6278 return;
6279
6280 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
6281 // however this is not necessarily true on taking any interrupt. Instruct
6282 // the backend to perform a realignment as part of the function prologue.
6283 llvm::AttrBuilder B;
6284 B.addStackAlignmentAttr(8);
6285 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
6286 }
6287 };
6288
6289 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
6290 public:
WindowsARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)6291 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6292 : ARMTargetCodeGenInfo(CGT, K) {}
6293
6294 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6295 CodeGen::CodeGenModule &CGM) const override;
6296
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const6297 void getDependentLibraryOption(llvm::StringRef Lib,
6298 llvm::SmallString<24> &Opt) const override {
6299 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
6300 }
6301
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const6302 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
6303 llvm::SmallString<32> &Opt) const override {
6304 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
6305 }
6306 };
6307
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const6308 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
6309 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
6310 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
6311 if (GV->isDeclaration())
6312 return;
6313 addStackProbeTargetAttributes(D, GV, CGM);
6314 }
6315 }
6316
computeInfo(CGFunctionInfo & FI) const6317 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
6318 if (!::classifyReturnType(getCXXABI(), FI, *this))
6319 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
6320 FI.getCallingConvention());
6321
6322 for (auto &I : FI.arguments())
6323 I.info = classifyArgumentType(I.type, FI.isVariadic(),
6324 FI.getCallingConvention());
6325
6326
6327 // Always honor user-specified calling convention.
6328 if (FI.getCallingConvention() != llvm::CallingConv::C)
6329 return;
6330
6331 llvm::CallingConv::ID cc = getRuntimeCC();
6332 if (cc != llvm::CallingConv::C)
6333 FI.setEffectiveCallingConvention(cc);
6334 }
6335
6336 /// Return the default calling convention that LLVM will use.
getLLVMDefaultCC() const6337 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
6338 // The default calling convention that LLVM will infer.
6339 if (isEABIHF() || getTarget().getTriple().isWatchABI())
6340 return llvm::CallingConv::ARM_AAPCS_VFP;
6341 else if (isEABI())
6342 return llvm::CallingConv::ARM_AAPCS;
6343 else
6344 return llvm::CallingConv::ARM_APCS;
6345 }
6346
6347 /// Return the calling convention that our ABI would like us to use
6348 /// as the C calling convention.
getABIDefaultCC() const6349 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
6350 switch (getABIKind()) {
6351 case APCS: return llvm::CallingConv::ARM_APCS;
6352 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
6353 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6354 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6355 }
6356 llvm_unreachable("bad ABI kind");
6357 }
6358
setCCs()6359 void ARMABIInfo::setCCs() {
6360 assert(getRuntimeCC() == llvm::CallingConv::C);
6361
6362 // Don't muddy up the IR with a ton of explicit annotations if
6363 // they'd just match what LLVM will infer from the triple.
6364 llvm::CallingConv::ID abiCC = getABIDefaultCC();
6365 if (abiCC != getLLVMDefaultCC())
6366 RuntimeCC = abiCC;
6367 }
6368
coerceIllegalVector(QualType Ty) const6369 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
6370 uint64_t Size = getContext().getTypeSize(Ty);
6371 if (Size <= 32) {
6372 llvm::Type *ResType =
6373 llvm::Type::getInt32Ty(getVMContext());
6374 return ABIArgInfo::getDirect(ResType);
6375 }
6376 if (Size == 64 || Size == 128) {
6377 auto *ResType = llvm::FixedVectorType::get(
6378 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6379 return ABIArgInfo::getDirect(ResType);
6380 }
6381 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6382 }
6383
classifyHomogeneousAggregate(QualType Ty,const Type * Base,uint64_t Members) const6384 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
6385 const Type *Base,
6386 uint64_t Members) const {
6387 assert(Base && "Base class should be set for homogeneous aggregate");
6388 // Base can be a floating-point or a vector.
6389 if (const VectorType *VT = Base->getAs<VectorType>()) {
6390 // FP16 vectors should be converted to integer vectors
6391 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
6392 uint64_t Size = getContext().getTypeSize(VT);
6393 auto *NewVecTy = llvm::FixedVectorType::get(
6394 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6395 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
6396 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6397 }
6398 }
6399 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
6400 }
6401
classifyArgumentType(QualType Ty,bool isVariadic,unsigned functionCallConv) const6402 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
6403 unsigned functionCallConv) const {
6404 // 6.1.2.1 The following argument types are VFP CPRCs:
6405 // A single-precision floating-point type (including promoted
6406 // half-precision types); A double-precision floating-point type;
6407 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
6408 // with a Base Type of a single- or double-precision floating-point type,
6409 // 64-bit containerized vectors or 128-bit containerized vectors with one
6410 // to four Elements.
6411 // Variadic functions should always marshal to the base standard.
6412 bool IsAAPCS_VFP =
6413 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
6414
6415 Ty = useFirstFieldIfTransparentUnion(Ty);
6416
6417 // Handle illegal vector types here.
6418 if (isIllegalVectorType(Ty))
6419 return coerceIllegalVector(Ty);
6420
6421 if (!isAggregateTypeForABI(Ty)) {
6422 // Treat an enum type as its underlying type.
6423 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
6424 Ty = EnumTy->getDecl()->getIntegerType();
6425 }
6426
6427 if (const auto *EIT = Ty->getAs<ExtIntType>())
6428 if (EIT->getNumBits() > 64)
6429 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6430
6431 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
6432 : ABIArgInfo::getDirect());
6433 }
6434
6435 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6436 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6437 }
6438
6439 // Ignore empty records.
6440 if (isEmptyRecord(getContext(), Ty, true))
6441 return ABIArgInfo::getIgnore();
6442
6443 if (IsAAPCS_VFP) {
6444 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
6445 // into VFP registers.
6446 const Type *Base = nullptr;
6447 uint64_t Members = 0;
6448 if (isHomogeneousAggregate(Ty, Base, Members))
6449 return classifyHomogeneousAggregate(Ty, Base, Members);
6450 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6451 // WatchOS does have homogeneous aggregates. Note that we intentionally use
6452 // this convention even for a variadic function: the backend will use GPRs
6453 // if needed.
6454 const Type *Base = nullptr;
6455 uint64_t Members = 0;
6456 if (isHomogeneousAggregate(Ty, Base, Members)) {
6457 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
6458 llvm::Type *Ty =
6459 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
6460 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6461 }
6462 }
6463
6464 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6465 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
6466 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
6467 // bigger than 128-bits, they get placed in space allocated by the caller,
6468 // and a pointer is passed.
6469 return ABIArgInfo::getIndirect(
6470 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
6471 }
6472
6473 // Support byval for ARM.
6474 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
6475 // most 8-byte. We realign the indirect argument if type alignment is bigger
6476 // than ABI alignment.
6477 uint64_t ABIAlign = 4;
6478 uint64_t TyAlign;
6479 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6480 getABIKind() == ARMABIInfo::AAPCS) {
6481 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
6482 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
6483 } else {
6484 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
6485 }
6486 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
6487 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
6488 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
6489 /*ByVal=*/true,
6490 /*Realign=*/TyAlign > ABIAlign);
6491 }
6492
6493 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
6494 // same size and alignment.
6495 if (getTarget().isRenderScriptTarget()) {
6496 return coerceToIntArray(Ty, getContext(), getVMContext());
6497 }
6498
6499 // Otherwise, pass by coercing to a structure of the appropriate size.
6500 llvm::Type* ElemTy;
6501 unsigned SizeRegs;
6502 // FIXME: Try to match the types of the arguments more accurately where
6503 // we can.
6504 if (TyAlign <= 4) {
6505 ElemTy = llvm::Type::getInt32Ty(getVMContext());
6506 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6507 } else {
6508 ElemTy = llvm::Type::getInt64Ty(getVMContext());
6509 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
6510 }
6511
6512 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
6513 }
6514
isIntegerLikeType(QualType Ty,ASTContext & Context,llvm::LLVMContext & VMContext)6515 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
6516 llvm::LLVMContext &VMContext) {
6517 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
6518 // is called integer-like if its size is less than or equal to one word, and
6519 // the offset of each of its addressable sub-fields is zero.
6520
6521 uint64_t Size = Context.getTypeSize(Ty);
6522
6523 // Check that the type fits in a word.
6524 if (Size > 32)
6525 return false;
6526
6527 // FIXME: Handle vector types!
6528 if (Ty->isVectorType())
6529 return false;
6530
6531 // Float types are never treated as "integer like".
6532 if (Ty->isRealFloatingType())
6533 return false;
6534
6535 // If this is a builtin or pointer type then it is ok.
6536 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6537 return true;
6538
6539 // Small complex integer types are "integer like".
6540 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6541 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6542
6543 // Single element and zero sized arrays should be allowed, by the definition
6544 // above, but they are not.
6545
6546 // Otherwise, it must be a record type.
6547 const RecordType *RT = Ty->getAs<RecordType>();
6548 if (!RT) return false;
6549
6550 // Ignore records with flexible arrays.
6551 const RecordDecl *RD = RT->getDecl();
6552 if (RD->hasFlexibleArrayMember())
6553 return false;
6554
6555 // Check that all sub-fields are at offset 0, and are themselves "integer
6556 // like".
6557 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6558
6559 bool HadField = false;
6560 unsigned idx = 0;
6561 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6562 i != e; ++i, ++idx) {
6563 const FieldDecl *FD = *i;
6564
6565 // Bit-fields are not addressable, we only need to verify they are "integer
6566 // like". We still have to disallow a subsequent non-bitfield, for example:
6567 // struct { int : 0; int x }
6568 // is non-integer like according to gcc.
6569 if (FD->isBitField()) {
6570 if (!RD->isUnion())
6571 HadField = true;
6572
6573 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6574 return false;
6575
6576 continue;
6577 }
6578
6579 // Check if this field is at offset 0.
6580 if (Layout.getFieldOffset(idx) != 0)
6581 return false;
6582
6583 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6584 return false;
6585
6586 // Only allow at most one field in a structure. This doesn't match the
6587 // wording above, but follows gcc in situations with a field following an
6588 // empty structure.
6589 if (!RD->isUnion()) {
6590 if (HadField)
6591 return false;
6592
6593 HadField = true;
6594 }
6595 }
6596
6597 return true;
6598 }
6599
classifyReturnType(QualType RetTy,bool isVariadic,unsigned functionCallConv) const6600 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6601 unsigned functionCallConv) const {
6602
6603 // Variadic functions should always marshal to the base standard.
6604 bool IsAAPCS_VFP =
6605 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6606
6607 if (RetTy->isVoidType())
6608 return ABIArgInfo::getIgnore();
6609
6610 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6611 // Large vector types should be returned via memory.
6612 if (getContext().getTypeSize(RetTy) > 128)
6613 return getNaturalAlignIndirect(RetTy);
6614 // TODO: FP16/BF16 vectors should be converted to integer vectors
6615 // This check is similar to isIllegalVectorType - refactor?
6616 if ((!getTarget().hasLegalHalfType() &&
6617 (VT->getElementType()->isFloat16Type() ||
6618 VT->getElementType()->isHalfType())) ||
6619 (IsFloatABISoftFP &&
6620 VT->getElementType()->isBFloat16Type()))
6621 return coerceIllegalVector(RetTy);
6622 }
6623
6624 if (!isAggregateTypeForABI(RetTy)) {
6625 // Treat an enum type as its underlying type.
6626 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6627 RetTy = EnumTy->getDecl()->getIntegerType();
6628
6629 if (const auto *EIT = RetTy->getAs<ExtIntType>())
6630 if (EIT->getNumBits() > 64)
6631 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
6632
6633 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
6634 : ABIArgInfo::getDirect();
6635 }
6636
6637 // Are we following APCS?
6638 if (getABIKind() == APCS) {
6639 if (isEmptyRecord(getContext(), RetTy, false))
6640 return ABIArgInfo::getIgnore();
6641
6642 // Complex types are all returned as packed integers.
6643 //
6644 // FIXME: Consider using 2 x vector types if the back end handles them
6645 // correctly.
6646 if (RetTy->isAnyComplexType())
6647 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6648 getVMContext(), getContext().getTypeSize(RetTy)));
6649
6650 // Integer like structures are returned in r0.
6651 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6652 // Return in the smallest viable integer type.
6653 uint64_t Size = getContext().getTypeSize(RetTy);
6654 if (Size <= 8)
6655 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6656 if (Size <= 16)
6657 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6658 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6659 }
6660
6661 // Otherwise return in memory.
6662 return getNaturalAlignIndirect(RetTy);
6663 }
6664
6665 // Otherwise this is an AAPCS variant.
6666
6667 if (isEmptyRecord(getContext(), RetTy, true))
6668 return ABIArgInfo::getIgnore();
6669
6670 // Check for homogeneous aggregates with AAPCS-VFP.
6671 if (IsAAPCS_VFP) {
6672 const Type *Base = nullptr;
6673 uint64_t Members = 0;
6674 if (isHomogeneousAggregate(RetTy, Base, Members))
6675 return classifyHomogeneousAggregate(RetTy, Base, Members);
6676 }
6677
6678 // Aggregates <= 4 bytes are returned in r0; other aggregates
6679 // are returned indirectly.
6680 uint64_t Size = getContext().getTypeSize(RetTy);
6681 if (Size <= 32) {
6682 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6683 // same size and alignment.
6684 if (getTarget().isRenderScriptTarget()) {
6685 return coerceToIntArray(RetTy, getContext(), getVMContext());
6686 }
6687 if (getDataLayout().isBigEndian())
6688 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6689 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6690
6691 // Return in the smallest viable integer type.
6692 if (Size <= 8)
6693 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6694 if (Size <= 16)
6695 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6696 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6697 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6698 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6699 llvm::Type *CoerceTy =
6700 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6701 return ABIArgInfo::getDirect(CoerceTy);
6702 }
6703
6704 return getNaturalAlignIndirect(RetTy);
6705 }
6706
6707 /// isIllegalVector - check whether Ty is an illegal vector type.
isIllegalVectorType(QualType Ty) const6708 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6709 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6710 // On targets that don't support half, fp16 or bfloat, they are expanded
6711 // into float, and we don't want the ABI to depend on whether or not they
6712 // are supported in hardware. Thus return false to coerce vectors of these
6713 // types into integer vectors.
6714 // We do not depend on hasLegalHalfType for bfloat as it is a
6715 // separate IR type.
6716 if ((!getTarget().hasLegalHalfType() &&
6717 (VT->getElementType()->isFloat16Type() ||
6718 VT->getElementType()->isHalfType())) ||
6719 (IsFloatABISoftFP &&
6720 VT->getElementType()->isBFloat16Type()))
6721 return true;
6722 if (isAndroid()) {
6723 // Android shipped using Clang 3.1, which supported a slightly different
6724 // vector ABI. The primary differences were that 3-element vector types
6725 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6726 // accepts that legacy behavior for Android only.
6727 // Check whether VT is legal.
6728 unsigned NumElements = VT->getNumElements();
6729 // NumElements should be power of 2 or equal to 3.
6730 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6731 return true;
6732 } else {
6733 // Check whether VT is legal.
6734 unsigned NumElements = VT->getNumElements();
6735 uint64_t Size = getContext().getTypeSize(VT);
6736 // NumElements should be power of 2.
6737 if (!llvm::isPowerOf2_32(NumElements))
6738 return true;
6739 // Size should be greater than 32 bits.
6740 return Size <= 32;
6741 }
6742 }
6743 return false;
6744 }
6745
6746 /// Return true if a type contains any 16-bit floating point vectors
containsAnyFP16Vectors(QualType Ty) const6747 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6748 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6749 uint64_t NElements = AT->getSize().getZExtValue();
6750 if (NElements == 0)
6751 return false;
6752 return containsAnyFP16Vectors(AT->getElementType());
6753 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6754 const RecordDecl *RD = RT->getDecl();
6755
6756 // If this is a C++ record, check the bases first.
6757 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6758 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6759 return containsAnyFP16Vectors(B.getType());
6760 }))
6761 return true;
6762
6763 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6764 return FD && containsAnyFP16Vectors(FD->getType());
6765 }))
6766 return true;
6767
6768 return false;
6769 } else {
6770 if (const VectorType *VT = Ty->getAs<VectorType>())
6771 return (VT->getElementType()->isFloat16Type() ||
6772 VT->getElementType()->isBFloat16Type() ||
6773 VT->getElementType()->isHalfType());
6774 return false;
6775 }
6776 }
6777
isLegalVectorTypeForSwift(CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts) const6778 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6779 llvm::Type *eltTy,
6780 unsigned numElts) const {
6781 if (!llvm::isPowerOf2_32(numElts))
6782 return false;
6783 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6784 if (size > 64)
6785 return false;
6786 if (vectorSize.getQuantity() != 8 &&
6787 (vectorSize.getQuantity() != 16 || numElts == 1))
6788 return false;
6789 return true;
6790 }
6791
isHomogeneousAggregateBaseType(QualType Ty) const6792 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6793 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6794 // double, or 64-bit or 128-bit vectors.
6795 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6796 if (BT->getKind() == BuiltinType::Float ||
6797 BT->getKind() == BuiltinType::Double ||
6798 BT->getKind() == BuiltinType::LongDouble)
6799 return true;
6800 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6801 unsigned VecSize = getContext().getTypeSize(VT);
6802 if (VecSize == 64 || VecSize == 128)
6803 return true;
6804 }
6805 return false;
6806 }
6807
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const6808 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6809 uint64_t Members) const {
6810 return Members <= 4;
6811 }
6812
isEffectivelyAAPCS_VFP(unsigned callConvention,bool acceptHalf) const6813 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6814 bool acceptHalf) const {
6815 // Give precedence to user-specified calling conventions.
6816 if (callConvention != llvm::CallingConv::C)
6817 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6818 else
6819 return (getABIKind() == AAPCS_VFP) ||
6820 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6821 }
6822
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6823 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6824 QualType Ty) const {
6825 CharUnits SlotSize = CharUnits::fromQuantity(4);
6826
6827 // Empty records are ignored for parameter passing purposes.
6828 if (isEmptyRecord(getContext(), Ty, true)) {
6829 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6830 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6831 return Addr;
6832 }
6833
6834 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6835 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6836
6837 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6838 bool IsIndirect = false;
6839 const Type *Base = nullptr;
6840 uint64_t Members = 0;
6841 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6842 IsIndirect = true;
6843
6844 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6845 // allocated by the caller.
6846 } else if (TySize > CharUnits::fromQuantity(16) &&
6847 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6848 !isHomogeneousAggregate(Ty, Base, Members)) {
6849 IsIndirect = true;
6850
6851 // Otherwise, bound the type's ABI alignment.
6852 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6853 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6854 // Our callers should be prepared to handle an under-aligned address.
6855 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6856 getABIKind() == ARMABIInfo::AAPCS) {
6857 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6858 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6859 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6860 // ARMv7k allows type alignment up to 16 bytes.
6861 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6862 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6863 } else {
6864 TyAlignForABI = CharUnits::fromQuantity(4);
6865 }
6866
6867 TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
6868 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6869 SlotSize, /*AllowHigherAlign*/ true);
6870 }
6871
6872 //===----------------------------------------------------------------------===//
6873 // NVPTX ABI Implementation
6874 //===----------------------------------------------------------------------===//
6875
6876 namespace {
6877
6878 class NVPTXTargetCodeGenInfo;
6879
6880 class NVPTXABIInfo : public ABIInfo {
6881 NVPTXTargetCodeGenInfo &CGInfo;
6882
6883 public:
NVPTXABIInfo(CodeGenTypes & CGT,NVPTXTargetCodeGenInfo & Info)6884 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
6885 : ABIInfo(CGT), CGInfo(Info) {}
6886
6887 ABIArgInfo classifyReturnType(QualType RetTy) const;
6888 ABIArgInfo classifyArgumentType(QualType Ty) const;
6889
6890 void computeInfo(CGFunctionInfo &FI) const override;
6891 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6892 QualType Ty) const override;
6893 bool isUnsupportedType(QualType T) const;
6894 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
6895 };
6896
6897 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6898 public:
NVPTXTargetCodeGenInfo(CodeGenTypes & CGT)6899 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6900 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
6901
6902 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6903 CodeGen::CodeGenModule &M) const override;
6904 bool shouldEmitStaticExternCAliases() const override;
6905
getCUDADeviceBuiltinSurfaceDeviceType() const6906 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
6907 // On the device side, surface reference is represented as an object handle
6908 // in 64-bit integer.
6909 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6910 }
6911
getCUDADeviceBuiltinTextureDeviceType() const6912 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
6913 // On the device side, texture reference is represented as an object handle
6914 // in 64-bit integer.
6915 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6916 }
6917
emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction & CGF,LValue Dst,LValue Src) const6918 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6919 LValue Src) const override {
6920 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6921 return true;
6922 }
6923
emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction & CGF,LValue Dst,LValue Src) const6924 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6925 LValue Src) const override {
6926 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6927 return true;
6928 }
6929
6930 private:
6931 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
6932 // resulting MDNode to the nvvm.annotations MDNode.
6933 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
6934 int Operand);
6935
emitBuiltinSurfTexDeviceCopy(CodeGenFunction & CGF,LValue Dst,LValue Src)6936 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6937 LValue Src) {
6938 llvm::Value *Handle = nullptr;
6939 llvm::Constant *C =
6940 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
6941 // Lookup `addrspacecast` through the constant pointer if any.
6942 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
6943 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
6944 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
6945 // Load the handle from the specific global variable using
6946 // `nvvm.texsurf.handle.internal` intrinsic.
6947 Handle = CGF.EmitRuntimeCall(
6948 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
6949 {GV->getType()}),
6950 {GV}, "texsurf_handle");
6951 } else
6952 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
6953 CGF.EmitStoreOfScalar(Handle, Dst);
6954 }
6955 };
6956
6957 /// Checks if the type is unsupported directly by the current target.
isUnsupportedType(QualType T) const6958 bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
6959 ASTContext &Context = getContext();
6960 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
6961 return true;
6962 if (!Context.getTargetInfo().hasFloat128Type() &&
6963 (T->isFloat128Type() ||
6964 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
6965 return true;
6966 if (const auto *EIT = T->getAs<ExtIntType>())
6967 return EIT->getNumBits() >
6968 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
6969 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
6970 Context.getTypeSize(T) > 64U)
6971 return true;
6972 if (const auto *AT = T->getAsArrayTypeUnsafe())
6973 return isUnsupportedType(AT->getElementType());
6974 const auto *RT = T->getAs<RecordType>();
6975 if (!RT)
6976 return false;
6977 const RecordDecl *RD = RT->getDecl();
6978
6979 // If this is a C++ record, check the bases first.
6980 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6981 for (const CXXBaseSpecifier &I : CXXRD->bases())
6982 if (isUnsupportedType(I.getType()))
6983 return true;
6984
6985 for (const FieldDecl *I : RD->fields())
6986 if (isUnsupportedType(I->getType()))
6987 return true;
6988 return false;
6989 }
6990
6991 /// Coerce the given type into an array with maximum allowed size of elements.
coerceToIntArrayWithLimit(QualType Ty,unsigned MaxSize) const6992 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
6993 unsigned MaxSize) const {
6994 // Alignment and Size are measured in bits.
6995 const uint64_t Size = getContext().getTypeSize(Ty);
6996 const uint64_t Alignment = getContext().getTypeAlign(Ty);
6997 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6998 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
6999 const uint64_t NumElements = (Size + Div - 1) / Div;
7000 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
7001 }
7002
classifyReturnType(QualType RetTy) const7003 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
7004 if (RetTy->isVoidType())
7005 return ABIArgInfo::getIgnore();
7006
7007 if (getContext().getLangOpts().OpenMP &&
7008 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
7009 return coerceToIntArrayWithLimit(RetTy, 64);
7010
7011 // note: this is different from default ABI
7012 if (!RetTy->isScalarType())
7013 return ABIArgInfo::getDirect();
7014
7015 // Treat an enum type as its underlying type.
7016 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7017 RetTy = EnumTy->getDecl()->getIntegerType();
7018
7019 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7020 : ABIArgInfo::getDirect());
7021 }
7022
classifyArgumentType(QualType Ty) const7023 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
7024 // Treat an enum type as its underlying type.
7025 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7026 Ty = EnumTy->getDecl()->getIntegerType();
7027
7028 // Return aggregates type as indirect by value
7029 if (isAggregateTypeForABI(Ty)) {
7030 // Under CUDA device compilation, tex/surf builtin types are replaced with
7031 // object types and passed directly.
7032 if (getContext().getLangOpts().CUDAIsDevice) {
7033 if (Ty->isCUDADeviceBuiltinSurfaceType())
7034 return ABIArgInfo::getDirect(
7035 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
7036 if (Ty->isCUDADeviceBuiltinTextureType())
7037 return ABIArgInfo::getDirect(
7038 CGInfo.getCUDADeviceBuiltinTextureDeviceType());
7039 }
7040 return getNaturalAlignIndirect(Ty, /* byval */ true);
7041 }
7042
7043 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
7044 if ((EIT->getNumBits() > 128) ||
7045 (!getContext().getTargetInfo().hasInt128Type() &&
7046 EIT->getNumBits() > 64))
7047 return getNaturalAlignIndirect(Ty, /* byval */ true);
7048 }
7049
7050 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
7051 : ABIArgInfo::getDirect());
7052 }
7053
computeInfo(CGFunctionInfo & FI) const7054 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
7055 if (!getCXXABI().classifyReturnType(FI))
7056 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7057 for (auto &I : FI.arguments())
7058 I.info = classifyArgumentType(I.type);
7059
7060 // Always honor user-specified calling convention.
7061 if (FI.getCallingConvention() != llvm::CallingConv::C)
7062 return;
7063
7064 FI.setEffectiveCallingConvention(getRuntimeCC());
7065 }
7066
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7067 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7068 QualType Ty) const {
7069 llvm_unreachable("NVPTX does not support varargs");
7070 }
7071
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const7072 void NVPTXTargetCodeGenInfo::setTargetAttributes(
7073 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7074 if (GV->isDeclaration())
7075 return;
7076 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
7077 if (VD) {
7078 if (M.getLangOpts().CUDA) {
7079 if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
7080 addNVVMMetadata(GV, "surface", 1);
7081 else if (VD->getType()->isCUDADeviceBuiltinTextureType())
7082 addNVVMMetadata(GV, "texture", 1);
7083 return;
7084 }
7085 }
7086
7087 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7088 if (!FD) return;
7089
7090 llvm::Function *F = cast<llvm::Function>(GV);
7091
7092 // Perform special handling in OpenCL mode
7093 if (M.getLangOpts().OpenCL) {
7094 // Use OpenCL function attributes to check for kernel functions
7095 // By default, all functions are device functions
7096 if (FD->hasAttr<OpenCLKernelAttr>()) {
7097 // OpenCL __kernel functions get kernel metadata
7098 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7099 addNVVMMetadata(F, "kernel", 1);
7100 // And kernel functions are not subject to inlining
7101 F->addFnAttr(llvm::Attribute::NoInline);
7102 }
7103 }
7104
7105 // Perform special handling in CUDA mode.
7106 if (M.getLangOpts().CUDA) {
7107 // CUDA __global__ functions get a kernel metadata entry. Since
7108 // __global__ functions cannot be called from the device, we do not
7109 // need to set the noinline attribute.
7110 if (FD->hasAttr<CUDAGlobalAttr>()) {
7111 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7112 addNVVMMetadata(F, "kernel", 1);
7113 }
7114 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
7115 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
7116 llvm::APSInt MaxThreads(32);
7117 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
7118 if (MaxThreads > 0)
7119 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
7120
7121 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
7122 // not specified in __launch_bounds__ or if the user specified a 0 value,
7123 // we don't have to add a PTX directive.
7124 if (Attr->getMinBlocks()) {
7125 llvm::APSInt MinBlocks(32);
7126 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
7127 if (MinBlocks > 0)
7128 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
7129 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
7130 }
7131 }
7132 }
7133 }
7134
addNVVMMetadata(llvm::GlobalValue * GV,StringRef Name,int Operand)7135 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
7136 StringRef Name, int Operand) {
7137 llvm::Module *M = GV->getParent();
7138 llvm::LLVMContext &Ctx = M->getContext();
7139
7140 // Get "nvvm.annotations" metadata node
7141 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
7142
7143 llvm::Metadata *MDVals[] = {
7144 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
7145 llvm::ConstantAsMetadata::get(
7146 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
7147 // Append metadata to nvvm.annotations
7148 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7149 }
7150
shouldEmitStaticExternCAliases() const7151 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
7152 return false;
7153 }
7154 }
7155
7156 //===----------------------------------------------------------------------===//
7157 // SystemZ ABI Implementation
7158 //===----------------------------------------------------------------------===//
7159
7160 namespace {
7161
7162 class SystemZABIInfo : public SwiftABIInfo {
7163 bool HasVector;
7164 bool IsSoftFloatABI;
7165
7166 public:
SystemZABIInfo(CodeGenTypes & CGT,bool HV,bool SF)7167 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
7168 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
7169
7170 bool isPromotableIntegerTypeForABI(QualType Ty) const;
7171 bool isCompoundType(QualType Ty) const;
7172 bool isVectorArgumentType(QualType Ty) const;
7173 bool isFPArgumentType(QualType Ty) const;
7174 QualType GetSingleElementType(QualType Ty) const;
7175
7176 ABIArgInfo classifyReturnType(QualType RetTy) const;
7177 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
7178
computeInfo(CGFunctionInfo & FI) const7179 void computeInfo(CGFunctionInfo &FI) const override {
7180 if (!getCXXABI().classifyReturnType(FI))
7181 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7182 for (auto &I : FI.arguments())
7183 I.info = classifyArgumentType(I.type);
7184 }
7185
7186 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7187 QualType Ty) const override;
7188
shouldPassIndirectlyForSwift(ArrayRef<llvm::Type * > scalars,bool asReturnValue) const7189 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
7190 bool asReturnValue) const override {
7191 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
7192 }
isSwiftErrorInRegister() const7193 bool isSwiftErrorInRegister() const override {
7194 return false;
7195 }
7196 };
7197
7198 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
7199 public:
SystemZTargetCodeGenInfo(CodeGenTypes & CGT,bool HasVector,bool SoftFloatABI)7200 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
7201 : TargetCodeGenInfo(
7202 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
7203 };
7204
7205 }
7206
isPromotableIntegerTypeForABI(QualType Ty) const7207 bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
7208 // Treat an enum type as its underlying type.
7209 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7210 Ty = EnumTy->getDecl()->getIntegerType();
7211
7212 // Promotable integer types are required to be promoted by the ABI.
7213 if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
7214 return true;
7215
7216 if (const auto *EIT = Ty->getAs<ExtIntType>())
7217 if (EIT->getNumBits() < 64)
7218 return true;
7219
7220 // 32-bit values must also be promoted.
7221 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7222 switch (BT->getKind()) {
7223 case BuiltinType::Int:
7224 case BuiltinType::UInt:
7225 return true;
7226 default:
7227 return false;
7228 }
7229 return false;
7230 }
7231
isCompoundType(QualType Ty) const7232 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
7233 return (Ty->isAnyComplexType() ||
7234 Ty->isVectorType() ||
7235 isAggregateTypeForABI(Ty));
7236 }
7237
isVectorArgumentType(QualType Ty) const7238 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
7239 return (HasVector &&
7240 Ty->isVectorType() &&
7241 getContext().getTypeSize(Ty) <= 128);
7242 }
7243
isFPArgumentType(QualType Ty) const7244 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
7245 if (IsSoftFloatABI)
7246 return false;
7247
7248 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7249 switch (BT->getKind()) {
7250 case BuiltinType::Float:
7251 case BuiltinType::Double:
7252 return true;
7253 default:
7254 return false;
7255 }
7256
7257 return false;
7258 }
7259
GetSingleElementType(QualType Ty) const7260 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
7261 const RecordType *RT = Ty->getAs<RecordType>();
7262
7263 if (RT && RT->isStructureOrClassType()) {
7264 const RecordDecl *RD = RT->getDecl();
7265 QualType Found;
7266
7267 // If this is a C++ record, check the bases first.
7268 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7269 for (const auto &I : CXXRD->bases()) {
7270 QualType Base = I.getType();
7271
7272 // Empty bases don't affect things either way.
7273 if (isEmptyRecord(getContext(), Base, true))
7274 continue;
7275
7276 if (!Found.isNull())
7277 return Ty;
7278 Found = GetSingleElementType(Base);
7279 }
7280
7281 // Check the fields.
7282 for (const auto *FD : RD->fields()) {
7283 // For compatibility with GCC, ignore empty bitfields in C++ mode.
7284 // Unlike isSingleElementStruct(), empty structure and array fields
7285 // do count. So do anonymous bitfields that aren't zero-sized.
7286 if (getContext().getLangOpts().CPlusPlus &&
7287 FD->isZeroLengthBitField(getContext()))
7288 continue;
7289 // Like isSingleElementStruct(), ignore C++20 empty data members.
7290 if (FD->hasAttr<NoUniqueAddressAttr>() &&
7291 isEmptyRecord(getContext(), FD->getType(), true))
7292 continue;
7293
7294 // Unlike isSingleElementStruct(), arrays do not count.
7295 // Nested structures still do though.
7296 if (!Found.isNull())
7297 return Ty;
7298 Found = GetSingleElementType(FD->getType());
7299 }
7300
7301 // Unlike isSingleElementStruct(), trailing padding is allowed.
7302 // An 8-byte aligned struct s { float f; } is passed as a double.
7303 if (!Found.isNull())
7304 return Found;
7305 }
7306
7307 return Ty;
7308 }
7309
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7310 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7311 QualType Ty) const {
7312 // Assume that va_list type is correct; should be pointer to LLVM type:
7313 // struct {
7314 // i64 __gpr;
7315 // i64 __fpr;
7316 // i8 *__overflow_arg_area;
7317 // i8 *__reg_save_area;
7318 // };
7319
7320 // Every non-vector argument occupies 8 bytes and is passed by preference
7321 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
7322 // always passed on the stack.
7323 Ty = getContext().getCanonicalType(Ty);
7324 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7325 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
7326 llvm::Type *DirectTy = ArgTy;
7327 ABIArgInfo AI = classifyArgumentType(Ty);
7328 bool IsIndirect = AI.isIndirect();
7329 bool InFPRs = false;
7330 bool IsVector = false;
7331 CharUnits UnpaddedSize;
7332 CharUnits DirectAlign;
7333 if (IsIndirect) {
7334 DirectTy = llvm::PointerType::getUnqual(DirectTy);
7335 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
7336 } else {
7337 if (AI.getCoerceToType())
7338 ArgTy = AI.getCoerceToType();
7339 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
7340 IsVector = ArgTy->isVectorTy();
7341 UnpaddedSize = TyInfo.Width;
7342 DirectAlign = TyInfo.Align;
7343 }
7344 CharUnits PaddedSize = CharUnits::fromQuantity(8);
7345 if (IsVector && UnpaddedSize > PaddedSize)
7346 PaddedSize = CharUnits::fromQuantity(16);
7347 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
7348
7349 CharUnits Padding = (PaddedSize - UnpaddedSize);
7350
7351 llvm::Type *IndexTy = CGF.Int64Ty;
7352 llvm::Value *PaddedSizeV =
7353 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
7354
7355 if (IsVector) {
7356 // Work out the address of a vector argument on the stack.
7357 // Vector arguments are always passed in the high bits of a
7358 // single (8 byte) or double (16 byte) stack slot.
7359 Address OverflowArgAreaPtr =
7360 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7361 Address OverflowArgArea =
7362 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7363 TyInfo.Align);
7364 Address MemAddr =
7365 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
7366
7367 // Update overflow_arg_area_ptr pointer
7368 llvm::Value *NewOverflowArgArea =
7369 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7370 "overflow_arg_area");
7371 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7372
7373 return MemAddr;
7374 }
7375
7376 assert(PaddedSize.getQuantity() == 8);
7377
7378 unsigned MaxRegs, RegCountField, RegSaveIndex;
7379 CharUnits RegPadding;
7380 if (InFPRs) {
7381 MaxRegs = 4; // Maximum of 4 FPR arguments
7382 RegCountField = 1; // __fpr
7383 RegSaveIndex = 16; // save offset for f0
7384 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
7385 } else {
7386 MaxRegs = 5; // Maximum of 5 GPR arguments
7387 RegCountField = 0; // __gpr
7388 RegSaveIndex = 2; // save offset for r2
7389 RegPadding = Padding; // values are passed in the low bits of a GPR
7390 }
7391
7392 Address RegCountPtr =
7393 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
7394 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
7395 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
7396 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
7397 "fits_in_regs");
7398
7399 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
7400 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
7401 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
7402 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
7403
7404 // Emit code to load the value if it was passed in registers.
7405 CGF.EmitBlock(InRegBlock);
7406
7407 // Work out the address of an argument register.
7408 llvm::Value *ScaledRegCount =
7409 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
7410 llvm::Value *RegBase =
7411 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
7412 + RegPadding.getQuantity());
7413 llvm::Value *RegOffset =
7414 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
7415 Address RegSaveAreaPtr =
7416 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
7417 llvm::Value *RegSaveArea =
7418 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
7419 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
7420 "raw_reg_addr"),
7421 PaddedSize);
7422 Address RegAddr =
7423 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
7424
7425 // Update the register count
7426 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
7427 llvm::Value *NewRegCount =
7428 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
7429 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
7430 CGF.EmitBranch(ContBlock);
7431
7432 // Emit code to load the value if it was passed in memory.
7433 CGF.EmitBlock(InMemBlock);
7434
7435 // Work out the address of a stack argument.
7436 Address OverflowArgAreaPtr =
7437 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7438 Address OverflowArgArea =
7439 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7440 PaddedSize);
7441 Address RawMemAddr =
7442 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
7443 Address MemAddr =
7444 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
7445
7446 // Update overflow_arg_area_ptr pointer
7447 llvm::Value *NewOverflowArgArea =
7448 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7449 "overflow_arg_area");
7450 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7451 CGF.EmitBranch(ContBlock);
7452
7453 // Return the appropriate result.
7454 CGF.EmitBlock(ContBlock);
7455 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
7456 MemAddr, InMemBlock, "va_arg.addr");
7457
7458 if (IsIndirect)
7459 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
7460 TyInfo.Align);
7461
7462 return ResAddr;
7463 }
7464
classifyReturnType(QualType RetTy) const7465 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
7466 if (RetTy->isVoidType())
7467 return ABIArgInfo::getIgnore();
7468 if (isVectorArgumentType(RetTy))
7469 return ABIArgInfo::getDirect();
7470 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
7471 return getNaturalAlignIndirect(RetTy);
7472 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7473 : ABIArgInfo::getDirect());
7474 }
7475
classifyArgumentType(QualType Ty) const7476 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
7477 // Handle the generic C++ ABI.
7478 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7479 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7480
7481 // Integers and enums are extended to full register width.
7482 if (isPromotableIntegerTypeForABI(Ty))
7483 return ABIArgInfo::getExtend(Ty);
7484
7485 // Handle vector types and vector-like structure types. Note that
7486 // as opposed to float-like structure types, we do not allow any
7487 // padding for vector-like structures, so verify the sizes match.
7488 uint64_t Size = getContext().getTypeSize(Ty);
7489 QualType SingleElementTy = GetSingleElementType(Ty);
7490 if (isVectorArgumentType(SingleElementTy) &&
7491 getContext().getTypeSize(SingleElementTy) == Size)
7492 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
7493
7494 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
7495 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
7496 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7497
7498 // Handle small structures.
7499 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7500 // Structures with flexible arrays have variable length, so really
7501 // fail the size test above.
7502 const RecordDecl *RD = RT->getDecl();
7503 if (RD->hasFlexibleArrayMember())
7504 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7505
7506 // The structure is passed as an unextended integer, a float, or a double.
7507 llvm::Type *PassTy;
7508 if (isFPArgumentType(SingleElementTy)) {
7509 assert(Size == 32 || Size == 64);
7510 if (Size == 32)
7511 PassTy = llvm::Type::getFloatTy(getVMContext());
7512 else
7513 PassTy = llvm::Type::getDoubleTy(getVMContext());
7514 } else
7515 PassTy = llvm::IntegerType::get(getVMContext(), Size);
7516 return ABIArgInfo::getDirect(PassTy);
7517 }
7518
7519 // Non-structure compounds are passed indirectly.
7520 if (isCompoundType(Ty))
7521 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7522
7523 return ABIArgInfo::getDirect(nullptr);
7524 }
7525
7526 //===----------------------------------------------------------------------===//
7527 // MSP430 ABI Implementation
7528 //===----------------------------------------------------------------------===//
7529
7530 namespace {
7531
7532 class MSP430ABIInfo : public DefaultABIInfo {
complexArgInfo()7533 static ABIArgInfo complexArgInfo() {
7534 ABIArgInfo Info = ABIArgInfo::getDirect();
7535 Info.setCanBeFlattened(false);
7536 return Info;
7537 }
7538
7539 public:
MSP430ABIInfo(CodeGenTypes & CGT)7540 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7541
classifyReturnType(QualType RetTy) const7542 ABIArgInfo classifyReturnType(QualType RetTy) const {
7543 if (RetTy->isAnyComplexType())
7544 return complexArgInfo();
7545
7546 return DefaultABIInfo::classifyReturnType(RetTy);
7547 }
7548
classifyArgumentType(QualType RetTy) const7549 ABIArgInfo classifyArgumentType(QualType RetTy) const {
7550 if (RetTy->isAnyComplexType())
7551 return complexArgInfo();
7552
7553 return DefaultABIInfo::classifyArgumentType(RetTy);
7554 }
7555
7556 // Just copy the original implementations because
7557 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
computeInfo(CGFunctionInfo & FI) const7558 void computeInfo(CGFunctionInfo &FI) const override {
7559 if (!getCXXABI().classifyReturnType(FI))
7560 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7561 for (auto &I : FI.arguments())
7562 I.info = classifyArgumentType(I.type);
7563 }
7564
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7565 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7566 QualType Ty) const override {
7567 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
7568 }
7569 };
7570
7571 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
7572 public:
MSP430TargetCodeGenInfo(CodeGenTypes & CGT)7573 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
7574 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
7575 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7576 CodeGen::CodeGenModule &M) const override;
7577 };
7578
7579 }
7580
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const7581 void MSP430TargetCodeGenInfo::setTargetAttributes(
7582 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7583 if (GV->isDeclaration())
7584 return;
7585 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
7586 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
7587 if (!InterruptAttr)
7588 return;
7589
7590 // Handle 'interrupt' attribute:
7591 llvm::Function *F = cast<llvm::Function>(GV);
7592
7593 // Step 1: Set ISR calling convention.
7594 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
7595
7596 // Step 2: Add attributes goodness.
7597 F->addFnAttr(llvm::Attribute::NoInline);
7598 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
7599 }
7600 }
7601
7602 //===----------------------------------------------------------------------===//
7603 // MIPS ABI Implementation. This works for both little-endian and
7604 // big-endian variants.
7605 //===----------------------------------------------------------------------===//
7606
7607 namespace {
7608 class MipsABIInfo : public ABIInfo {
7609 bool IsO32;
7610 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
7611 void CoerceToIntArgs(uint64_t TySize,
7612 SmallVectorImpl<llvm::Type *> &ArgList) const;
7613 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
7614 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
7615 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
7616 public:
MipsABIInfo(CodeGenTypes & CGT,bool _IsO32)7617 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
7618 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
7619 StackAlignInBytes(IsO32 ? 8 : 16) {}
7620
7621 ABIArgInfo classifyReturnType(QualType RetTy) const;
7622 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
7623 void computeInfo(CGFunctionInfo &FI) const override;
7624 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7625 QualType Ty) const override;
7626 ABIArgInfo extendType(QualType Ty) const;
7627 };
7628
7629 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
7630 unsigned SizeOfUnwindException;
7631 public:
MIPSTargetCodeGenInfo(CodeGenTypes & CGT,bool IsO32)7632 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
7633 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
7634 SizeOfUnwindException(IsO32 ? 24 : 32) {}
7635
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const7636 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
7637 return 29;
7638 }
7639
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const7640 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7641 CodeGen::CodeGenModule &CGM) const override {
7642 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7643 if (!FD) return;
7644 llvm::Function *Fn = cast<llvm::Function>(GV);
7645
7646 if (FD->hasAttr<MipsLongCallAttr>())
7647 Fn->addFnAttr("long-call");
7648 else if (FD->hasAttr<MipsShortCallAttr>())
7649 Fn->addFnAttr("short-call");
7650
7651 // Other attributes do not have a meaning for declarations.
7652 if (GV->isDeclaration())
7653 return;
7654
7655 if (FD->hasAttr<Mips16Attr>()) {
7656 Fn->addFnAttr("mips16");
7657 }
7658 else if (FD->hasAttr<NoMips16Attr>()) {
7659 Fn->addFnAttr("nomips16");
7660 }
7661
7662 if (FD->hasAttr<MicroMipsAttr>())
7663 Fn->addFnAttr("micromips");
7664 else if (FD->hasAttr<NoMicroMipsAttr>())
7665 Fn->addFnAttr("nomicromips");
7666
7667 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
7668 if (!Attr)
7669 return;
7670
7671 const char *Kind;
7672 switch (Attr->getInterrupt()) {
7673 case MipsInterruptAttr::eic: Kind = "eic"; break;
7674 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7675 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7676 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7677 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7678 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7679 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7680 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7681 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7682 }
7683
7684 Fn->addFnAttr("interrupt", Kind);
7685
7686 }
7687
7688 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7689 llvm::Value *Address) const override;
7690
getSizeOfUnwindException() const7691 unsigned getSizeOfUnwindException() const override {
7692 return SizeOfUnwindException;
7693 }
7694 };
7695 }
7696
CoerceToIntArgs(uint64_t TySize,SmallVectorImpl<llvm::Type * > & ArgList) const7697 void MipsABIInfo::CoerceToIntArgs(
7698 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7699 llvm::IntegerType *IntTy =
7700 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7701
7702 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7703 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7704 ArgList.push_back(IntTy);
7705
7706 // If necessary, add one more integer type to ArgList.
7707 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7708
7709 if (R)
7710 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7711 }
7712
7713 // In N32/64, an aligned double precision floating point field is passed in
7714 // a register.
HandleAggregates(QualType Ty,uint64_t TySize) const7715 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7716 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7717
7718 if (IsO32) {
7719 CoerceToIntArgs(TySize, ArgList);
7720 return llvm::StructType::get(getVMContext(), ArgList);
7721 }
7722
7723 if (Ty->isComplexType())
7724 return CGT.ConvertType(Ty);
7725
7726 const RecordType *RT = Ty->getAs<RecordType>();
7727
7728 // Unions/vectors are passed in integer registers.
7729 if (!RT || !RT->isStructureOrClassType()) {
7730 CoerceToIntArgs(TySize, ArgList);
7731 return llvm::StructType::get(getVMContext(), ArgList);
7732 }
7733
7734 const RecordDecl *RD = RT->getDecl();
7735 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7736 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
7737
7738 uint64_t LastOffset = 0;
7739 unsigned idx = 0;
7740 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7741
7742 // Iterate over fields in the struct/class and check if there are any aligned
7743 // double fields.
7744 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7745 i != e; ++i, ++idx) {
7746 const QualType Ty = i->getType();
7747 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7748
7749 if (!BT || BT->getKind() != BuiltinType::Double)
7750 continue;
7751
7752 uint64_t Offset = Layout.getFieldOffset(idx);
7753 if (Offset % 64) // Ignore doubles that are not aligned.
7754 continue;
7755
7756 // Add ((Offset - LastOffset) / 64) args of type i64.
7757 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7758 ArgList.push_back(I64);
7759
7760 // Add double type.
7761 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7762 LastOffset = Offset + 64;
7763 }
7764
7765 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7766 ArgList.append(IntArgList.begin(), IntArgList.end());
7767
7768 return llvm::StructType::get(getVMContext(), ArgList);
7769 }
7770
getPaddingType(uint64_t OrigOffset,uint64_t Offset) const7771 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7772 uint64_t Offset) const {
7773 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7774 return nullptr;
7775
7776 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7777 }
7778
7779 ABIArgInfo
classifyArgumentType(QualType Ty,uint64_t & Offset) const7780 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7781 Ty = useFirstFieldIfTransparentUnion(Ty);
7782
7783 uint64_t OrigOffset = Offset;
7784 uint64_t TySize = getContext().getTypeSize(Ty);
7785 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7786
7787 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7788 (uint64_t)StackAlignInBytes);
7789 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7790 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7791
7792 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7793 // Ignore empty aggregates.
7794 if (TySize == 0)
7795 return ABIArgInfo::getIgnore();
7796
7797 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7798 Offset = OrigOffset + MinABIStackAlignInBytes;
7799 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7800 }
7801
7802 // If we have reached here, aggregates are passed directly by coercing to
7803 // another structure type. Padding is inserted if the offset of the
7804 // aggregate is unaligned.
7805 ABIArgInfo ArgInfo =
7806 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7807 getPaddingType(OrigOffset, CurrOffset));
7808 ArgInfo.setInReg(true);
7809 return ArgInfo;
7810 }
7811
7812 // Treat an enum type as its underlying type.
7813 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7814 Ty = EnumTy->getDecl()->getIntegerType();
7815
7816 // Make sure we pass indirectly things that are too large.
7817 if (const auto *EIT = Ty->getAs<ExtIntType>())
7818 if (EIT->getNumBits() > 128 ||
7819 (EIT->getNumBits() > 64 &&
7820 !getContext().getTargetInfo().hasInt128Type()))
7821 return getNaturalAlignIndirect(Ty);
7822
7823 // All integral types are promoted to the GPR width.
7824 if (Ty->isIntegralOrEnumerationType())
7825 return extendType(Ty);
7826
7827 return ABIArgInfo::getDirect(
7828 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7829 }
7830
7831 llvm::Type*
returnAggregateInRegs(QualType RetTy,uint64_t Size) const7832 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7833 const RecordType *RT = RetTy->getAs<RecordType>();
7834 SmallVector<llvm::Type*, 8> RTList;
7835
7836 if (RT && RT->isStructureOrClassType()) {
7837 const RecordDecl *RD = RT->getDecl();
7838 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7839 unsigned FieldCnt = Layout.getFieldCount();
7840
7841 // N32/64 returns struct/classes in floating point registers if the
7842 // following conditions are met:
7843 // 1. The size of the struct/class is no larger than 128-bit.
7844 // 2. The struct/class has one or two fields all of which are floating
7845 // point types.
7846 // 3. The offset of the first field is zero (this follows what gcc does).
7847 //
7848 // Any other composite results are returned in integer registers.
7849 //
7850 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7851 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7852 for (; b != e; ++b) {
7853 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7854
7855 if (!BT || !BT->isFloatingPoint())
7856 break;
7857
7858 RTList.push_back(CGT.ConvertType(b->getType()));
7859 }
7860
7861 if (b == e)
7862 return llvm::StructType::get(getVMContext(), RTList,
7863 RD->hasAttr<PackedAttr>());
7864
7865 RTList.clear();
7866 }
7867 }
7868
7869 CoerceToIntArgs(Size, RTList);
7870 return llvm::StructType::get(getVMContext(), RTList);
7871 }
7872
classifyReturnType(QualType RetTy) const7873 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7874 uint64_t Size = getContext().getTypeSize(RetTy);
7875
7876 if (RetTy->isVoidType())
7877 return ABIArgInfo::getIgnore();
7878
7879 // O32 doesn't treat zero-sized structs differently from other structs.
7880 // However, N32/N64 ignores zero sized return values.
7881 if (!IsO32 && Size == 0)
7882 return ABIArgInfo::getIgnore();
7883
7884 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7885 if (Size <= 128) {
7886 if (RetTy->isAnyComplexType())
7887 return ABIArgInfo::getDirect();
7888
7889 // O32 returns integer vectors in registers and N32/N64 returns all small
7890 // aggregates in registers.
7891 if (!IsO32 ||
7892 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7893 ABIArgInfo ArgInfo =
7894 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7895 ArgInfo.setInReg(true);
7896 return ArgInfo;
7897 }
7898 }
7899
7900 return getNaturalAlignIndirect(RetTy);
7901 }
7902
7903 // Treat an enum type as its underlying type.
7904 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7905 RetTy = EnumTy->getDecl()->getIntegerType();
7906
7907 // Make sure we pass indirectly things that are too large.
7908 if (const auto *EIT = RetTy->getAs<ExtIntType>())
7909 if (EIT->getNumBits() > 128 ||
7910 (EIT->getNumBits() > 64 &&
7911 !getContext().getTargetInfo().hasInt128Type()))
7912 return getNaturalAlignIndirect(RetTy);
7913
7914 if (isPromotableIntegerTypeForABI(RetTy))
7915 return ABIArgInfo::getExtend(RetTy);
7916
7917 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7918 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7919 return ABIArgInfo::getSignExtend(RetTy);
7920
7921 return ABIArgInfo::getDirect();
7922 }
7923
computeInfo(CGFunctionInfo & FI) const7924 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7925 ABIArgInfo &RetInfo = FI.getReturnInfo();
7926 if (!getCXXABI().classifyReturnType(FI))
7927 RetInfo = classifyReturnType(FI.getReturnType());
7928
7929 // Check if a pointer to an aggregate is passed as a hidden argument.
7930 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7931
7932 for (auto &I : FI.arguments())
7933 I.info = classifyArgumentType(I.type, Offset);
7934 }
7935
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType OrigTy) const7936 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7937 QualType OrigTy) const {
7938 QualType Ty = OrigTy;
7939
7940 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7941 // Pointers are also promoted in the same way but this only matters for N32.
7942 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7943 unsigned PtrWidth = getTarget().getPointerWidth(0);
7944 bool DidPromote = false;
7945 if ((Ty->isIntegerType() &&
7946 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7947 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7948 DidPromote = true;
7949 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7950 Ty->isSignedIntegerType());
7951 }
7952
7953 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7954
7955 // The alignment of things in the argument area is never larger than
7956 // StackAlignInBytes.
7957 TyInfo.Align =
7958 std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
7959
7960 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7961 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7962
7963 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7964 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7965
7966
7967 // If there was a promotion, "unpromote" into a temporary.
7968 // TODO: can we just use a pointer into a subset of the original slot?
7969 if (DidPromote) {
7970 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7971 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7972
7973 // Truncate down to the right width.
7974 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7975 : CGF.IntPtrTy);
7976 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7977 if (OrigTy->isPointerType())
7978 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7979
7980 CGF.Builder.CreateStore(V, Temp);
7981 Addr = Temp;
7982 }
7983
7984 return Addr;
7985 }
7986
extendType(QualType Ty) const7987 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7988 int TySize = getContext().getTypeSize(Ty);
7989
7990 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7991 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7992 return ABIArgInfo::getSignExtend(Ty);
7993
7994 return ABIArgInfo::getExtend(Ty);
7995 }
7996
7997 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const7998 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7999 llvm::Value *Address) const {
8000 // This information comes from gcc's implementation, which seems to
8001 // as canonical as it gets.
8002
8003 // Everything on MIPS is 4 bytes. Double-precision FP registers
8004 // are aliased to pairs of single-precision FP registers.
8005 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
8006
8007 // 0-31 are the general purpose registers, $0 - $31.
8008 // 32-63 are the floating-point registers, $f0 - $f31.
8009 // 64 and 65 are the multiply/divide registers, $hi and $lo.
8010 // 66 is the (notional, I think) register for signal-handler return.
8011 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
8012
8013 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
8014 // They are one bit wide and ignored here.
8015
8016 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
8017 // (coprocessor 1 is the FP unit)
8018 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
8019 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
8020 // 176-181 are the DSP accumulator registers.
8021 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
8022 return false;
8023 }
8024
8025 //===----------------------------------------------------------------------===//
8026 // AVR ABI Implementation.
8027 //===----------------------------------------------------------------------===//
8028
8029 namespace {
8030 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
8031 public:
AVRTargetCodeGenInfo(CodeGenTypes & CGT)8032 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
8033 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
8034
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const8035 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8036 CodeGen::CodeGenModule &CGM) const override {
8037 if (GV->isDeclaration())
8038 return;
8039 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
8040 if (!FD) return;
8041 auto *Fn = cast<llvm::Function>(GV);
8042
8043 if (FD->getAttr<AVRInterruptAttr>())
8044 Fn->addFnAttr("interrupt");
8045
8046 if (FD->getAttr<AVRSignalAttr>())
8047 Fn->addFnAttr("signal");
8048 }
8049 };
8050 }
8051
8052 //===----------------------------------------------------------------------===//
8053 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
8054 // Currently subclassed only to implement custom OpenCL C function attribute
8055 // handling.
8056 //===----------------------------------------------------------------------===//
8057
8058 namespace {
8059
8060 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
8061 public:
TCETargetCodeGenInfo(CodeGenTypes & CGT)8062 TCETargetCodeGenInfo(CodeGenTypes &CGT)
8063 : DefaultTargetCodeGenInfo(CGT) {}
8064
8065 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8066 CodeGen::CodeGenModule &M) const override;
8067 };
8068
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const8069 void TCETargetCodeGenInfo::setTargetAttributes(
8070 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8071 if (GV->isDeclaration())
8072 return;
8073 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8074 if (!FD) return;
8075
8076 llvm::Function *F = cast<llvm::Function>(GV);
8077
8078 if (M.getLangOpts().OpenCL) {
8079 if (FD->hasAttr<OpenCLKernelAttr>()) {
8080 // OpenCL C Kernel functions are not subject to inlining
8081 F->addFnAttr(llvm::Attribute::NoInline);
8082 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
8083 if (Attr) {
8084 // Convert the reqd_work_group_size() attributes to metadata.
8085 llvm::LLVMContext &Context = F->getContext();
8086 llvm::NamedMDNode *OpenCLMetadata =
8087 M.getModule().getOrInsertNamedMetadata(
8088 "opencl.kernel_wg_size_info");
8089
8090 SmallVector<llvm::Metadata *, 5> Operands;
8091 Operands.push_back(llvm::ConstantAsMetadata::get(F));
8092
8093 Operands.push_back(
8094 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8095 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
8096 Operands.push_back(
8097 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8098 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
8099 Operands.push_back(
8100 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8101 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
8102
8103 // Add a boolean constant operand for "required" (true) or "hint"
8104 // (false) for implementing the work_group_size_hint attr later.
8105 // Currently always true as the hint is not yet implemented.
8106 Operands.push_back(
8107 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
8108 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
8109 }
8110 }
8111 }
8112 }
8113
8114 }
8115
8116 //===----------------------------------------------------------------------===//
8117 // Hexagon ABI Implementation
8118 //===----------------------------------------------------------------------===//
8119
8120 namespace {
8121
8122 class HexagonABIInfo : public DefaultABIInfo {
8123 public:
HexagonABIInfo(CodeGenTypes & CGT)8124 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8125
8126 private:
8127 ABIArgInfo classifyReturnType(QualType RetTy) const;
8128 ABIArgInfo classifyArgumentType(QualType RetTy) const;
8129 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
8130
8131 void computeInfo(CGFunctionInfo &FI) const override;
8132
8133 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8134 QualType Ty) const override;
8135 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
8136 QualType Ty) const;
8137 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
8138 QualType Ty) const;
8139 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
8140 QualType Ty) const;
8141 };
8142
8143 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
8144 public:
HexagonTargetCodeGenInfo(CodeGenTypes & CGT)8145 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
8146 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
8147
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const8148 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8149 return 29;
8150 }
8151
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & GCM) const8152 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8153 CodeGen::CodeGenModule &GCM) const override {
8154 if (GV->isDeclaration())
8155 return;
8156 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8157 if (!FD)
8158 return;
8159 }
8160 };
8161
8162 } // namespace
8163
computeInfo(CGFunctionInfo & FI) const8164 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
8165 unsigned RegsLeft = 6;
8166 if (!getCXXABI().classifyReturnType(FI))
8167 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8168 for (auto &I : FI.arguments())
8169 I.info = classifyArgumentType(I.type, &RegsLeft);
8170 }
8171
HexagonAdjustRegsLeft(uint64_t Size,unsigned * RegsLeft)8172 static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
8173 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
8174 " through registers");
8175
8176 if (*RegsLeft == 0)
8177 return false;
8178
8179 if (Size <= 32) {
8180 (*RegsLeft)--;
8181 return true;
8182 }
8183
8184 if (2 <= (*RegsLeft & (~1U))) {
8185 *RegsLeft = (*RegsLeft & (~1U)) - 2;
8186 return true;
8187 }
8188
8189 // Next available register was r5 but candidate was greater than 32-bits so it
8190 // has to go on the stack. However we still consume r5
8191 if (*RegsLeft == 1)
8192 *RegsLeft = 0;
8193
8194 return false;
8195 }
8196
classifyArgumentType(QualType Ty,unsigned * RegsLeft) const8197 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
8198 unsigned *RegsLeft) const {
8199 if (!isAggregateTypeForABI(Ty)) {
8200 // Treat an enum type as its underlying type.
8201 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8202 Ty = EnumTy->getDecl()->getIntegerType();
8203
8204 uint64_t Size = getContext().getTypeSize(Ty);
8205 if (Size <= 64)
8206 HexagonAdjustRegsLeft(Size, RegsLeft);
8207
8208 if (Size > 64 && Ty->isExtIntType())
8209 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8210
8211 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
8212 : ABIArgInfo::getDirect();
8213 }
8214
8215 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8216 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8217
8218 // Ignore empty records.
8219 if (isEmptyRecord(getContext(), Ty, true))
8220 return ABIArgInfo::getIgnore();
8221
8222 uint64_t Size = getContext().getTypeSize(Ty);
8223 unsigned Align = getContext().getTypeAlign(Ty);
8224
8225 if (Size > 64)
8226 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8227
8228 if (HexagonAdjustRegsLeft(Size, RegsLeft))
8229 Align = Size <= 32 ? 32 : 64;
8230 if (Size <= Align) {
8231 // Pass in the smallest viable integer type.
8232 if (!llvm::isPowerOf2_64(Size))
8233 Size = llvm::NextPowerOf2(Size);
8234 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8235 }
8236 return DefaultABIInfo::classifyArgumentType(Ty);
8237 }
8238
classifyReturnType(QualType RetTy) const8239 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
8240 if (RetTy->isVoidType())
8241 return ABIArgInfo::getIgnore();
8242
8243 const TargetInfo &T = CGT.getTarget();
8244 uint64_t Size = getContext().getTypeSize(RetTy);
8245
8246 if (RetTy->getAs<VectorType>()) {
8247 // HVX vectors are returned in vector registers or register pairs.
8248 if (T.hasFeature("hvx")) {
8249 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
8250 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
8251 if (Size == VecSize || Size == 2*VecSize)
8252 return ABIArgInfo::getDirectInReg();
8253 }
8254 // Large vector types should be returned via memory.
8255 if (Size > 64)
8256 return getNaturalAlignIndirect(RetTy);
8257 }
8258
8259 if (!isAggregateTypeForABI(RetTy)) {
8260 // Treat an enum type as its underlying type.
8261 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
8262 RetTy = EnumTy->getDecl()->getIntegerType();
8263
8264 if (Size > 64 && RetTy->isExtIntType())
8265 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
8266
8267 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
8268 : ABIArgInfo::getDirect();
8269 }
8270
8271 if (isEmptyRecord(getContext(), RetTy, true))
8272 return ABIArgInfo::getIgnore();
8273
8274 // Aggregates <= 8 bytes are returned in registers, other aggregates
8275 // are returned indirectly.
8276 if (Size <= 64) {
8277 // Return in the smallest viable integer type.
8278 if (!llvm::isPowerOf2_64(Size))
8279 Size = llvm::NextPowerOf2(Size);
8280 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8281 }
8282 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
8283 }
8284
EmitVAArgFromMemory(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8285 Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
8286 Address VAListAddr,
8287 QualType Ty) const {
8288 // Load the overflow area pointer.
8289 Address __overflow_area_pointer_p =
8290 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8291 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8292 __overflow_area_pointer_p, "__overflow_area_pointer");
8293
8294 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
8295 if (Align > 4) {
8296 // Alignment should be a power of 2.
8297 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
8298
8299 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
8300 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
8301
8302 // Add offset to the current pointer to access the argument.
8303 __overflow_area_pointer =
8304 CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
8305 llvm::Value *AsInt =
8306 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8307
8308 // Create a mask which should be "AND"ed
8309 // with (overflow_arg_area + align - 1)
8310 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
8311 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8312 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
8313 "__overflow_area_pointer.align");
8314 }
8315
8316 // Get the type of the argument from memory and bitcast
8317 // overflow area pointer to the argument type.
8318 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
8319 Address AddrTyped = CGF.Builder.CreateBitCast(
8320 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
8321 llvm::PointerType::getUnqual(PTy));
8322
8323 // Round up to the minimum stack alignment for varargs which is 4 bytes.
8324 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8325
8326 __overflow_area_pointer = CGF.Builder.CreateGEP(
8327 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
8328 "__overflow_area_pointer.next");
8329 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
8330
8331 return AddrTyped;
8332 }
8333
EmitVAArgForHexagon(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8334 Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
8335 Address VAListAddr,
8336 QualType Ty) const {
8337 // FIXME: Need to handle alignment
8338 llvm::Type *BP = CGF.Int8PtrTy;
8339 llvm::Type *BPP = CGF.Int8PtrPtrTy;
8340 CGBuilderTy &Builder = CGF.Builder;
8341 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
8342 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
8343 // Handle address alignment for type alignment > 32 bits
8344 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
8345 if (TyAlign > 4) {
8346 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
8347 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
8348 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
8349 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
8350 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
8351 }
8352 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
8353 Address AddrTyped = Builder.CreateBitCast(
8354 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
8355
8356 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8357 llvm::Value *NextAddr = Builder.CreateGEP(
8358 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
8359 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
8360
8361 return AddrTyped;
8362 }
8363
EmitVAArgForHexagonLinux(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8364 Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
8365 Address VAListAddr,
8366 QualType Ty) const {
8367 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
8368
8369 if (ArgSize > 8)
8370 return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
8371
8372 // Here we have check if the argument is in register area or
8373 // in overflow area.
8374 // If the saved register area pointer + argsize rounded up to alignment >
8375 // saved register area end pointer, argument is in overflow area.
8376 unsigned RegsLeft = 6;
8377 Ty = CGF.getContext().getCanonicalType(Ty);
8378 (void)classifyArgumentType(Ty, &RegsLeft);
8379
8380 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
8381 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
8382 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
8383 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
8384
8385 // Get rounded size of the argument.GCC does not allow vararg of
8386 // size < 4 bytes. We follow the same logic here.
8387 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8388 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8389
8390 // Argument may be in saved register area
8391 CGF.EmitBlock(MaybeRegBlock);
8392
8393 // Load the current saved register area pointer.
8394 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
8395 VAListAddr, 0, "__current_saved_reg_area_pointer_p");
8396 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
8397 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
8398
8399 // Load the saved register area end pointer.
8400 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
8401 VAListAddr, 1, "__saved_reg_area_end_pointer_p");
8402 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
8403 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
8404
8405 // If the size of argument is > 4 bytes, check if the stack
8406 // location is aligned to 8 bytes
8407 if (ArgAlign > 4) {
8408
8409 llvm::Value *__current_saved_reg_area_pointer_int =
8410 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
8411 CGF.Int32Ty);
8412
8413 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
8414 __current_saved_reg_area_pointer_int,
8415 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
8416 "align_current_saved_reg_area_pointer");
8417
8418 __current_saved_reg_area_pointer_int =
8419 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
8420 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8421 "align_current_saved_reg_area_pointer");
8422
8423 __current_saved_reg_area_pointer =
8424 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
8425 __current_saved_reg_area_pointer->getType(),
8426 "align_current_saved_reg_area_pointer");
8427 }
8428
8429 llvm::Value *__new_saved_reg_area_pointer =
8430 CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
8431 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8432 "__new_saved_reg_area_pointer");
8433
8434 llvm::Value *UsingStack = 0;
8435 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
8436 __saved_reg_area_end_pointer);
8437
8438 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
8439
8440 // Argument in saved register area
8441 // Implement the block where argument is in register saved area
8442 CGF.EmitBlock(InRegBlock);
8443
8444 llvm::Type *PTy = CGF.ConvertType(Ty);
8445 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
8446 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
8447
8448 CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
8449 __current_saved_reg_area_pointer_p);
8450
8451 CGF.EmitBranch(ContBlock);
8452
8453 // Argument in overflow area
8454 // Implement the block where the argument is in overflow area.
8455 CGF.EmitBlock(OnStackBlock);
8456
8457 // Load the overflow area pointer
8458 Address __overflow_area_pointer_p =
8459 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8460 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8461 __overflow_area_pointer_p, "__overflow_area_pointer");
8462
8463 // Align the overflow area pointer according to the alignment of the argument
8464 if (ArgAlign > 4) {
8465 llvm::Value *__overflow_area_pointer_int =
8466 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8467
8468 __overflow_area_pointer_int =
8469 CGF.Builder.CreateAdd(__overflow_area_pointer_int,
8470 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
8471 "align_overflow_area_pointer");
8472
8473 __overflow_area_pointer_int =
8474 CGF.Builder.CreateAnd(__overflow_area_pointer_int,
8475 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8476 "align_overflow_area_pointer");
8477
8478 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8479 __overflow_area_pointer_int, __overflow_area_pointer->getType(),
8480 "align_overflow_area_pointer");
8481 }
8482
8483 // Get the pointer for next argument in overflow area and store it
8484 // to overflow area pointer.
8485 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
8486 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8487 "__overflow_area_pointer.next");
8488
8489 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8490 __overflow_area_pointer_p);
8491
8492 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8493 __current_saved_reg_area_pointer_p);
8494
8495 // Bitcast the overflow area pointer to the type of argument.
8496 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
8497 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
8498 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
8499
8500 CGF.EmitBranch(ContBlock);
8501
8502 // Get the correct pointer to load the variable argument
8503 // Implement the ContBlock
8504 CGF.EmitBlock(ContBlock);
8505
8506 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
8507 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
8508 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
8509 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
8510
8511 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
8512 }
8513
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8514 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8515 QualType Ty) const {
8516
8517 if (getTarget().getTriple().isMusl())
8518 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
8519
8520 return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
8521 }
8522
8523 //===----------------------------------------------------------------------===//
8524 // Lanai ABI Implementation
8525 //===----------------------------------------------------------------------===//
8526
8527 namespace {
8528 class LanaiABIInfo : public DefaultABIInfo {
8529 public:
LanaiABIInfo(CodeGen::CodeGenTypes & CGT)8530 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8531
8532 bool shouldUseInReg(QualType Ty, CCState &State) const;
8533
computeInfo(CGFunctionInfo & FI) const8534 void computeInfo(CGFunctionInfo &FI) const override {
8535 CCState State(FI);
8536 // Lanai uses 4 registers to pass arguments unless the function has the
8537 // regparm attribute set.
8538 if (FI.getHasRegParm()) {
8539 State.FreeRegs = FI.getRegParm();
8540 } else {
8541 State.FreeRegs = 4;
8542 }
8543
8544 if (!getCXXABI().classifyReturnType(FI))
8545 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8546 for (auto &I : FI.arguments())
8547 I.info = classifyArgumentType(I.type, State);
8548 }
8549
8550 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
8551 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
8552 };
8553 } // end anonymous namespace
8554
shouldUseInReg(QualType Ty,CCState & State) const8555 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
8556 unsigned Size = getContext().getTypeSize(Ty);
8557 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
8558
8559 if (SizeInRegs == 0)
8560 return false;
8561
8562 if (SizeInRegs > State.FreeRegs) {
8563 State.FreeRegs = 0;
8564 return false;
8565 }
8566
8567 State.FreeRegs -= SizeInRegs;
8568
8569 return true;
8570 }
8571
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const8572 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
8573 CCState &State) const {
8574 if (!ByVal) {
8575 if (State.FreeRegs) {
8576 --State.FreeRegs; // Non-byval indirects just use one pointer.
8577 return getNaturalAlignIndirectInReg(Ty);
8578 }
8579 return getNaturalAlignIndirect(Ty, false);
8580 }
8581
8582 // Compute the byval alignment.
8583 const unsigned MinABIStackAlignInBytes = 4;
8584 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8585 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8586 /*Realign=*/TypeAlign >
8587 MinABIStackAlignInBytes);
8588 }
8589
classifyArgumentType(QualType Ty,CCState & State) const8590 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
8591 CCState &State) const {
8592 // Check with the C++ ABI first.
8593 const RecordType *RT = Ty->getAs<RecordType>();
8594 if (RT) {
8595 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8596 if (RAA == CGCXXABI::RAA_Indirect) {
8597 return getIndirectResult(Ty, /*ByVal=*/false, State);
8598 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
8599 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8600 }
8601 }
8602
8603 if (isAggregateTypeForABI(Ty)) {
8604 // Structures with flexible arrays are always indirect.
8605 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8606 return getIndirectResult(Ty, /*ByVal=*/true, State);
8607
8608 // Ignore empty structs/unions.
8609 if (isEmptyRecord(getContext(), Ty, true))
8610 return ABIArgInfo::getIgnore();
8611
8612 llvm::LLVMContext &LLVMContext = getVMContext();
8613 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
8614 if (SizeInRegs <= State.FreeRegs) {
8615 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8616 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8617 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8618 State.FreeRegs -= SizeInRegs;
8619 return ABIArgInfo::getDirectInReg(Result);
8620 } else {
8621 State.FreeRegs = 0;
8622 }
8623 return getIndirectResult(Ty, true, State);
8624 }
8625
8626 // Treat an enum type as its underlying type.
8627 if (const auto *EnumTy = Ty->getAs<EnumType>())
8628 Ty = EnumTy->getDecl()->getIntegerType();
8629
8630 bool InReg = shouldUseInReg(Ty, State);
8631
8632 // Don't pass >64 bit integers in registers.
8633 if (const auto *EIT = Ty->getAs<ExtIntType>())
8634 if (EIT->getNumBits() > 64)
8635 return getIndirectResult(Ty, /*ByVal=*/true, State);
8636
8637 if (isPromotableIntegerTypeForABI(Ty)) {
8638 if (InReg)
8639 return ABIArgInfo::getDirectInReg();
8640 return ABIArgInfo::getExtend(Ty);
8641 }
8642 if (InReg)
8643 return ABIArgInfo::getDirectInReg();
8644 return ABIArgInfo::getDirect();
8645 }
8646
8647 namespace {
8648 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
8649 public:
LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)8650 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8651 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
8652 };
8653 }
8654
8655 //===----------------------------------------------------------------------===//
8656 // AMDGPU ABI Implementation
8657 //===----------------------------------------------------------------------===//
8658
8659 namespace {
8660
8661 class AMDGPUABIInfo final : public DefaultABIInfo {
8662 private:
8663 static const unsigned MaxNumRegsForArgsRet = 16;
8664
8665 unsigned numRegsForType(QualType Ty) const;
8666
8667 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
8668 bool isHomogeneousAggregateSmallEnough(const Type *Base,
8669 uint64_t Members) const override;
8670
8671 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
coerceKernelArgumentType(llvm::Type * Ty,unsigned FromAS,unsigned ToAS) const8672 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
8673 unsigned ToAS) const {
8674 // Single value types.
8675 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
8676 return llvm::PointerType::get(
8677 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
8678 return Ty;
8679 }
8680
8681 public:
AMDGPUABIInfo(CodeGen::CodeGenTypes & CGT)8682 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
8683 DefaultABIInfo(CGT) {}
8684
8685 ABIArgInfo classifyReturnType(QualType RetTy) const;
8686 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
8687 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
8688
8689 void computeInfo(CGFunctionInfo &FI) const override;
8690 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8691 QualType Ty) const override;
8692 };
8693
isHomogeneousAggregateBaseType(QualType Ty) const8694 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
8695 return true;
8696 }
8697
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const8698 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
8699 const Type *Base, uint64_t Members) const {
8700 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
8701
8702 // Homogeneous Aggregates may occupy at most 16 registers.
8703 return Members * NumRegs <= MaxNumRegsForArgsRet;
8704 }
8705
8706 /// Estimate number of registers the type will use when passed in registers.
numRegsForType(QualType Ty) const8707 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
8708 unsigned NumRegs = 0;
8709
8710 if (const VectorType *VT = Ty->getAs<VectorType>()) {
8711 // Compute from the number of elements. The reported size is based on the
8712 // in-memory size, which includes the padding 4th element for 3-vectors.
8713 QualType EltTy = VT->getElementType();
8714 unsigned EltSize = getContext().getTypeSize(EltTy);
8715
8716 // 16-bit element vectors should be passed as packed.
8717 if (EltSize == 16)
8718 return (VT->getNumElements() + 1) / 2;
8719
8720 unsigned EltNumRegs = (EltSize + 31) / 32;
8721 return EltNumRegs * VT->getNumElements();
8722 }
8723
8724 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8725 const RecordDecl *RD = RT->getDecl();
8726 assert(!RD->hasFlexibleArrayMember());
8727
8728 for (const FieldDecl *Field : RD->fields()) {
8729 QualType FieldTy = Field->getType();
8730 NumRegs += numRegsForType(FieldTy);
8731 }
8732
8733 return NumRegs;
8734 }
8735
8736 return (getContext().getTypeSize(Ty) + 31) / 32;
8737 }
8738
computeInfo(CGFunctionInfo & FI) const8739 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
8740 llvm::CallingConv::ID CC = FI.getCallingConvention();
8741
8742 if (!getCXXABI().classifyReturnType(FI))
8743 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8744
8745 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
8746 for (auto &Arg : FI.arguments()) {
8747 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
8748 Arg.info = classifyKernelArgumentType(Arg.type);
8749 } else {
8750 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
8751 }
8752 }
8753 }
8754
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const8755 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8756 QualType Ty) const {
8757 llvm_unreachable("AMDGPU does not support varargs");
8758 }
8759
classifyReturnType(QualType RetTy) const8760 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
8761 if (isAggregateTypeForABI(RetTy)) {
8762 // Records with non-trivial destructors/copy-constructors should not be
8763 // returned by value.
8764 if (!getRecordArgABI(RetTy, getCXXABI())) {
8765 // Ignore empty structs/unions.
8766 if (isEmptyRecord(getContext(), RetTy, true))
8767 return ABIArgInfo::getIgnore();
8768
8769 // Lower single-element structs to just return a regular value.
8770 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
8771 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8772
8773 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
8774 const RecordDecl *RD = RT->getDecl();
8775 if (RD->hasFlexibleArrayMember())
8776 return DefaultABIInfo::classifyReturnType(RetTy);
8777 }
8778
8779 // Pack aggregates <= 4 bytes into single VGPR or pair.
8780 uint64_t Size = getContext().getTypeSize(RetTy);
8781 if (Size <= 16)
8782 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8783
8784 if (Size <= 32)
8785 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8786
8787 if (Size <= 64) {
8788 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8789 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8790 }
8791
8792 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
8793 return ABIArgInfo::getDirect();
8794 }
8795 }
8796
8797 // Otherwise just do the default thing.
8798 return DefaultABIInfo::classifyReturnType(RetTy);
8799 }
8800
8801 /// For kernels all parameters are really passed in a special buffer. It doesn't
8802 /// make sense to pass anything byval, so everything must be direct.
classifyKernelArgumentType(QualType Ty) const8803 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
8804 Ty = useFirstFieldIfTransparentUnion(Ty);
8805
8806 // TODO: Can we omit empty structs?
8807
8808 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8809 Ty = QualType(SeltTy, 0);
8810
8811 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
8812 llvm::Type *LTy = OrigLTy;
8813 if (getContext().getLangOpts().HIP) {
8814 LTy = coerceKernelArgumentType(
8815 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
8816 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
8817 }
8818
8819 // FIXME: Should also use this for OpenCL, but it requires addressing the
8820 // problem of kernels being called.
8821 //
8822 // FIXME: This doesn't apply the optimization of coercing pointers in structs
8823 // to global address space when using byref. This would require implementing a
8824 // new kind of coercion of the in-memory type when for indirect arguments.
8825 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
8826 isAggregateTypeForABI(Ty)) {
8827 return ABIArgInfo::getIndirectAliased(
8828 getContext().getTypeAlignInChars(Ty),
8829 getContext().getTargetAddressSpace(LangAS::opencl_constant),
8830 false /*Realign*/, nullptr /*Padding*/);
8831 }
8832
8833 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
8834 // individual elements, which confuses the Clover OpenCL backend; therefore we
8835 // have to set it to false here. Other args of getDirect() are just defaults.
8836 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
8837 }
8838
classifyArgumentType(QualType Ty,unsigned & NumRegsLeft) const8839 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
8840 unsigned &NumRegsLeft) const {
8841 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
8842
8843 Ty = useFirstFieldIfTransparentUnion(Ty);
8844
8845 if (isAggregateTypeForABI(Ty)) {
8846 // Records with non-trivial destructors/copy-constructors should not be
8847 // passed by value.
8848 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
8849 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8850
8851 // Ignore empty structs/unions.
8852 if (isEmptyRecord(getContext(), Ty, true))
8853 return ABIArgInfo::getIgnore();
8854
8855 // Lower single-element structs to just pass a regular value. TODO: We
8856 // could do reasonable-size multiple-element structs too, using getExpand(),
8857 // though watch out for things like bitfields.
8858 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8859 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8860
8861 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8862 const RecordDecl *RD = RT->getDecl();
8863 if (RD->hasFlexibleArrayMember())
8864 return DefaultABIInfo::classifyArgumentType(Ty);
8865 }
8866
8867 // Pack aggregates <= 8 bytes into single VGPR or pair.
8868 uint64_t Size = getContext().getTypeSize(Ty);
8869 if (Size <= 64) {
8870 unsigned NumRegs = (Size + 31) / 32;
8871 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
8872
8873 if (Size <= 16)
8874 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8875
8876 if (Size <= 32)
8877 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8878
8879 // XXX: Should this be i64 instead, and should the limit increase?
8880 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8881 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8882 }
8883
8884 if (NumRegsLeft > 0) {
8885 unsigned NumRegs = numRegsForType(Ty);
8886 if (NumRegsLeft >= NumRegs) {
8887 NumRegsLeft -= NumRegs;
8888 return ABIArgInfo::getDirect();
8889 }
8890 }
8891 }
8892
8893 // Otherwise just do the default thing.
8894 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
8895 if (!ArgInfo.isIndirect()) {
8896 unsigned NumRegs = numRegsForType(Ty);
8897 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
8898 }
8899
8900 return ArgInfo;
8901 }
8902
8903 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
8904 public:
AMDGPUTargetCodeGenInfo(CodeGenTypes & CGT)8905 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
8906 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
8907 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8908 CodeGen::CodeGenModule &M) const override;
8909 unsigned getOpenCLKernelCallingConv() const override;
8910
8911 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
8912 llvm::PointerType *T, QualType QT) const override;
8913
getASTAllocaAddressSpace() const8914 LangAS getASTAllocaAddressSpace() const override {
8915 return getLangASFromTargetAS(
8916 getABIInfo().getDataLayout().getAllocaAddrSpace());
8917 }
8918 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
8919 const VarDecl *D) const override;
8920 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
8921 SyncScope Scope,
8922 llvm::AtomicOrdering Ordering,
8923 llvm::LLVMContext &Ctx) const override;
8924 llvm::Function *
8925 createEnqueuedBlockKernel(CodeGenFunction &CGF,
8926 llvm::Function *BlockInvokeFunc,
8927 llvm::Value *BlockLiteral) const override;
8928 bool shouldEmitStaticExternCAliases() const override;
8929 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
8930 };
8931 }
8932
requiresAMDGPUProtectedVisibility(const Decl * D,llvm::GlobalValue * GV)8933 static bool requiresAMDGPUProtectedVisibility(const Decl *D,
8934 llvm::GlobalValue *GV) {
8935 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
8936 return false;
8937
8938 return D->hasAttr<OpenCLKernelAttr>() ||
8939 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
8940 (isa<VarDecl>(D) &&
8941 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
8942 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
8943 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
8944 }
8945
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const8946 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
8947 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8948 if (requiresAMDGPUProtectedVisibility(D, GV)) {
8949 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
8950 GV->setDSOLocal(true);
8951 }
8952
8953 if (GV->isDeclaration())
8954 return;
8955 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8956 if (!FD)
8957 return;
8958
8959 llvm::Function *F = cast<llvm::Function>(GV);
8960
8961 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
8962 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
8963
8964
8965 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
8966 FD->hasAttr<OpenCLKernelAttr>();
8967 const bool IsHIPKernel = M.getLangOpts().HIP &&
8968 FD->hasAttr<CUDAGlobalAttr>();
8969 if ((IsOpenCLKernel || IsHIPKernel) &&
8970 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
8971 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
8972
8973 if (IsHIPKernel)
8974 F->addFnAttr("uniform-work-group-size", "true");
8975
8976
8977 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
8978 if (ReqdWGS || FlatWGS) {
8979 unsigned Min = 0;
8980 unsigned Max = 0;
8981 if (FlatWGS) {
8982 Min = FlatWGS->getMin()
8983 ->EvaluateKnownConstInt(M.getContext())
8984 .getExtValue();
8985 Max = FlatWGS->getMax()
8986 ->EvaluateKnownConstInt(M.getContext())
8987 .getExtValue();
8988 }
8989 if (ReqdWGS && Min == 0 && Max == 0)
8990 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
8991
8992 if (Min != 0) {
8993 assert(Min <= Max && "Min must be less than or equal Max");
8994
8995 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
8996 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
8997 } else
8998 assert(Max == 0 && "Max must be zero");
8999 } else if (IsOpenCLKernel || IsHIPKernel) {
9000 // By default, restrict the maximum size to a value specified by
9001 // --gpu-max-threads-per-block=n or its default value.
9002 std::string AttrVal =
9003 std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
9004 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
9005 }
9006
9007 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
9008 unsigned Min =
9009 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
9010 unsigned Max = Attr->getMax() ? Attr->getMax()
9011 ->EvaluateKnownConstInt(M.getContext())
9012 .getExtValue()
9013 : 0;
9014
9015 if (Min != 0) {
9016 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
9017
9018 std::string AttrVal = llvm::utostr(Min);
9019 if (Max != 0)
9020 AttrVal = AttrVal + "," + llvm::utostr(Max);
9021 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
9022 } else
9023 assert(Max == 0 && "Max must be zero");
9024 }
9025
9026 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
9027 unsigned NumSGPR = Attr->getNumSGPR();
9028
9029 if (NumSGPR != 0)
9030 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
9031 }
9032
9033 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
9034 uint32_t NumVGPR = Attr->getNumVGPR();
9035
9036 if (NumVGPR != 0)
9037 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
9038 }
9039
9040 if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
9041 F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
9042 }
9043
getOpenCLKernelCallingConv() const9044 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9045 return llvm::CallingConv::AMDGPU_KERNEL;
9046 }
9047
9048 // Currently LLVM assumes null pointers always have value 0,
9049 // which results in incorrectly transformed IR. Therefore, instead of
9050 // emitting null pointers in private and local address spaces, a null
9051 // pointer in generic address space is emitted which is casted to a
9052 // pointer in local or private address space.
getNullPointer(const CodeGen::CodeGenModule & CGM,llvm::PointerType * PT,QualType QT) const9053 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
9054 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
9055 QualType QT) const {
9056 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
9057 return llvm::ConstantPointerNull::get(PT);
9058
9059 auto &Ctx = CGM.getContext();
9060 auto NPT = llvm::PointerType::get(PT->getElementType(),
9061 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
9062 return llvm::ConstantExpr::getAddrSpaceCast(
9063 llvm::ConstantPointerNull::get(NPT), PT);
9064 }
9065
9066 LangAS
getGlobalVarAddressSpace(CodeGenModule & CGM,const VarDecl * D) const9067 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
9068 const VarDecl *D) const {
9069 assert(!CGM.getLangOpts().OpenCL &&
9070 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
9071 "Address space agnostic languages only");
9072 LangAS DefaultGlobalAS = getLangASFromTargetAS(
9073 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
9074 if (!D)
9075 return DefaultGlobalAS;
9076
9077 LangAS AddrSpace = D->getType().getAddressSpace();
9078 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
9079 if (AddrSpace != LangAS::Default)
9080 return AddrSpace;
9081
9082 if (CGM.isTypeConstant(D->getType(), false)) {
9083 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
9084 return ConstAS.getValue();
9085 }
9086 return DefaultGlobalAS;
9087 }
9088
9089 llvm::SyncScope::ID
getLLVMSyncScopeID(const LangOptions & LangOpts,SyncScope Scope,llvm::AtomicOrdering Ordering,llvm::LLVMContext & Ctx) const9090 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
9091 SyncScope Scope,
9092 llvm::AtomicOrdering Ordering,
9093 llvm::LLVMContext &Ctx) const {
9094 std::string Name;
9095 switch (Scope) {
9096 case SyncScope::OpenCLWorkGroup:
9097 Name = "workgroup";
9098 break;
9099 case SyncScope::OpenCLDevice:
9100 Name = "agent";
9101 break;
9102 case SyncScope::OpenCLAllSVMDevices:
9103 Name = "";
9104 break;
9105 case SyncScope::OpenCLSubGroup:
9106 Name = "wavefront";
9107 }
9108
9109 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
9110 if (!Name.empty())
9111 Name = Twine(Twine(Name) + Twine("-")).str();
9112
9113 Name = Twine(Twine(Name) + Twine("one-as")).str();
9114 }
9115
9116 return Ctx.getOrInsertSyncScopeID(Name);
9117 }
9118
shouldEmitStaticExternCAliases() const9119 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
9120 return false;
9121 }
9122
setCUDAKernelCallingConvention(const FunctionType * & FT) const9123 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
9124 const FunctionType *&FT) const {
9125 FT = getABIInfo().getContext().adjustFunctionType(
9126 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
9127 }
9128
9129 //===----------------------------------------------------------------------===//
9130 // SPARC v8 ABI Implementation.
9131 // Based on the SPARC Compliance Definition version 2.4.1.
9132 //
9133 // Ensures that complex values are passed in registers.
9134 //
9135 namespace {
9136 class SparcV8ABIInfo : public DefaultABIInfo {
9137 public:
SparcV8ABIInfo(CodeGenTypes & CGT)9138 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9139
9140 private:
9141 ABIArgInfo classifyReturnType(QualType RetTy) const;
9142 void computeInfo(CGFunctionInfo &FI) const override;
9143 };
9144 } // end anonymous namespace
9145
9146
9147 ABIArgInfo
classifyReturnType(QualType Ty) const9148 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
9149 if (Ty->isAnyComplexType()) {
9150 return ABIArgInfo::getDirect();
9151 }
9152 else {
9153 return DefaultABIInfo::classifyReturnType(Ty);
9154 }
9155 }
9156
computeInfo(CGFunctionInfo & FI) const9157 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9158
9159 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9160 for (auto &Arg : FI.arguments())
9161 Arg.info = classifyArgumentType(Arg.type);
9162 }
9163
9164 namespace {
9165 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
9166 public:
SparcV8TargetCodeGenInfo(CodeGenTypes & CGT)9167 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
9168 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
9169 };
9170 } // end anonymous namespace
9171
9172 //===----------------------------------------------------------------------===//
9173 // SPARC v9 ABI Implementation.
9174 // Based on the SPARC Compliance Definition version 2.4.1.
9175 //
9176 // Function arguments a mapped to a nominal "parameter array" and promoted to
9177 // registers depending on their type. Each argument occupies 8 or 16 bytes in
9178 // the array, structs larger than 16 bytes are passed indirectly.
9179 //
9180 // One case requires special care:
9181 //
9182 // struct mixed {
9183 // int i;
9184 // float f;
9185 // };
9186 //
9187 // When a struct mixed is passed by value, it only occupies 8 bytes in the
9188 // parameter array, but the int is passed in an integer register, and the float
9189 // is passed in a floating point register. This is represented as two arguments
9190 // with the LLVM IR inreg attribute:
9191 //
9192 // declare void f(i32 inreg %i, float inreg %f)
9193 //
9194 // The code generator will only allocate 4 bytes from the parameter array for
9195 // the inreg arguments. All other arguments are allocated a multiple of 8
9196 // bytes.
9197 //
9198 namespace {
9199 class SparcV9ABIInfo : public ABIInfo {
9200 public:
SparcV9ABIInfo(CodeGenTypes & CGT)9201 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
9202
9203 private:
9204 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
9205 void computeInfo(CGFunctionInfo &FI) const override;
9206 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9207 QualType Ty) const override;
9208
9209 // Coercion type builder for structs passed in registers. The coercion type
9210 // serves two purposes:
9211 //
9212 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
9213 // in registers.
9214 // 2. Expose aligned floating point elements as first-level elements, so the
9215 // code generator knows to pass them in floating point registers.
9216 //
9217 // We also compute the InReg flag which indicates that the struct contains
9218 // aligned 32-bit floats.
9219 //
9220 struct CoerceBuilder {
9221 llvm::LLVMContext &Context;
9222 const llvm::DataLayout &DL;
9223 SmallVector<llvm::Type*, 8> Elems;
9224 uint64_t Size;
9225 bool InReg;
9226
CoerceBuilder__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9227 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
9228 : Context(c), DL(dl), Size(0), InReg(false) {}
9229
9230 // Pad Elems with integers until Size is ToSize.
pad__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9231 void pad(uint64_t ToSize) {
9232 assert(ToSize >= Size && "Cannot remove elements");
9233 if (ToSize == Size)
9234 return;
9235
9236 // Finish the current 64-bit word.
9237 uint64_t Aligned = llvm::alignTo(Size, 64);
9238 if (Aligned > Size && Aligned <= ToSize) {
9239 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
9240 Size = Aligned;
9241 }
9242
9243 // Add whole 64-bit words.
9244 while (Size + 64 <= ToSize) {
9245 Elems.push_back(llvm::Type::getInt64Ty(Context));
9246 Size += 64;
9247 }
9248
9249 // Final in-word padding.
9250 if (Size < ToSize) {
9251 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
9252 Size = ToSize;
9253 }
9254 }
9255
9256 // Add a floating point element at Offset.
addFloat__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9257 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
9258 // Unaligned floats are treated as integers.
9259 if (Offset % Bits)
9260 return;
9261 // The InReg flag is only required if there are any floats < 64 bits.
9262 if (Bits < 64)
9263 InReg = true;
9264 pad(Offset);
9265 Elems.push_back(Ty);
9266 Size = Offset + Bits;
9267 }
9268
9269 // Add a struct type to the coercion type, starting at Offset (in bits).
addStruct__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9270 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
9271 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
9272 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
9273 llvm::Type *ElemTy = StrTy->getElementType(i);
9274 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
9275 switch (ElemTy->getTypeID()) {
9276 case llvm::Type::StructTyID:
9277 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
9278 break;
9279 case llvm::Type::FloatTyID:
9280 addFloat(ElemOffset, ElemTy, 32);
9281 break;
9282 case llvm::Type::DoubleTyID:
9283 addFloat(ElemOffset, ElemTy, 64);
9284 break;
9285 case llvm::Type::FP128TyID:
9286 addFloat(ElemOffset, ElemTy, 128);
9287 break;
9288 case llvm::Type::PointerTyID:
9289 if (ElemOffset % 64 == 0) {
9290 pad(ElemOffset);
9291 Elems.push_back(ElemTy);
9292 Size += 64;
9293 }
9294 break;
9295 default:
9296 break;
9297 }
9298 }
9299 }
9300
9301 // Check if Ty is a usable substitute for the coercion type.
isUsableType__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9302 bool isUsableType(llvm::StructType *Ty) const {
9303 return llvm::makeArrayRef(Elems) == Ty->elements();
9304 }
9305
9306 // Get the coercion type as a literal struct type.
getType__anonc4b339491611::SparcV9ABIInfo::CoerceBuilder9307 llvm::Type *getType() const {
9308 if (Elems.size() == 1)
9309 return Elems.front();
9310 else
9311 return llvm::StructType::get(Context, Elems);
9312 }
9313 };
9314 };
9315 } // end anonymous namespace
9316
9317 ABIArgInfo
classifyType(QualType Ty,unsigned SizeLimit) const9318 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
9319 if (Ty->isVoidType())
9320 return ABIArgInfo::getIgnore();
9321
9322 uint64_t Size = getContext().getTypeSize(Ty);
9323
9324 // Anything too big to fit in registers is passed with an explicit indirect
9325 // pointer / sret pointer.
9326 if (Size > SizeLimit)
9327 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9328
9329 // Treat an enum type as its underlying type.
9330 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9331 Ty = EnumTy->getDecl()->getIntegerType();
9332
9333 // Integer types smaller than a register are extended.
9334 if (Size < 64 && Ty->isIntegerType())
9335 return ABIArgInfo::getExtend(Ty);
9336
9337 if (const auto *EIT = Ty->getAs<ExtIntType>())
9338 if (EIT->getNumBits() < 64)
9339 return ABIArgInfo::getExtend(Ty);
9340
9341 // Other non-aggregates go in registers.
9342 if (!isAggregateTypeForABI(Ty))
9343 return ABIArgInfo::getDirect();
9344
9345 // If a C++ object has either a non-trivial copy constructor or a non-trivial
9346 // destructor, it is passed with an explicit indirect pointer / sret pointer.
9347 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
9348 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
9349
9350 // This is a small aggregate type that should be passed in registers.
9351 // Build a coercion type from the LLVM struct type.
9352 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
9353 if (!StrTy)
9354 return ABIArgInfo::getDirect();
9355
9356 CoerceBuilder CB(getVMContext(), getDataLayout());
9357 CB.addStruct(0, StrTy);
9358 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
9359
9360 // Try to use the original type for coercion.
9361 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
9362
9363 if (CB.InReg)
9364 return ABIArgInfo::getDirectInReg(CoerceTy);
9365 else
9366 return ABIArgInfo::getDirect(CoerceTy);
9367 }
9368
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const9369 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9370 QualType Ty) const {
9371 ABIArgInfo AI = classifyType(Ty, 16 * 8);
9372 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9373 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9374 AI.setCoerceToType(ArgTy);
9375
9376 CharUnits SlotSize = CharUnits::fromQuantity(8);
9377
9378 CGBuilderTy &Builder = CGF.Builder;
9379 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
9380 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9381
9382 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
9383
9384 Address ArgAddr = Address::invalid();
9385 CharUnits Stride;
9386 switch (AI.getKind()) {
9387 case ABIArgInfo::Expand:
9388 case ABIArgInfo::CoerceAndExpand:
9389 case ABIArgInfo::InAlloca:
9390 llvm_unreachable("Unsupported ABI kind for va_arg");
9391
9392 case ABIArgInfo::Extend: {
9393 Stride = SlotSize;
9394 CharUnits Offset = SlotSize - TypeInfo.Width;
9395 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
9396 break;
9397 }
9398
9399 case ABIArgInfo::Direct: {
9400 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
9401 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
9402 ArgAddr = Addr;
9403 break;
9404 }
9405
9406 case ABIArgInfo::Indirect:
9407 case ABIArgInfo::IndirectAliased:
9408 Stride = SlotSize;
9409 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
9410 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
9411 TypeInfo.Align);
9412 break;
9413
9414 case ABIArgInfo::Ignore:
9415 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
9416 }
9417
9418 // Update VAList.
9419 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
9420 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
9421
9422 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
9423 }
9424
computeInfo(CGFunctionInfo & FI) const9425 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9426 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
9427 for (auto &I : FI.arguments())
9428 I.info = classifyType(I.type, 16 * 8);
9429 }
9430
9431 namespace {
9432 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
9433 public:
SparcV9TargetCodeGenInfo(CodeGenTypes & CGT)9434 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
9435 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
9436
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const9437 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
9438 return 14;
9439 }
9440
9441 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9442 llvm::Value *Address) const override;
9443 };
9444 } // end anonymous namespace
9445
9446 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const9447 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9448 llvm::Value *Address) const {
9449 // This is calculated from the LLVM and GCC tables and verified
9450 // against gcc output. AFAIK all ABIs use the same encoding.
9451
9452 CodeGen::CGBuilderTy &Builder = CGF.Builder;
9453
9454 llvm::IntegerType *i8 = CGF.Int8Ty;
9455 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
9456 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
9457
9458 // 0-31: the 8-byte general-purpose registers
9459 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
9460
9461 // 32-63: f0-31, the 4-byte floating-point registers
9462 AssignToArrayRange(Builder, Address, Four8, 32, 63);
9463
9464 // Y = 64
9465 // PSR = 65
9466 // WIM = 66
9467 // TBR = 67
9468 // PC = 68
9469 // NPC = 69
9470 // FSR = 70
9471 // CSR = 71
9472 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
9473
9474 // 72-87: d0-15, the 8-byte floating-point registers
9475 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
9476
9477 return false;
9478 }
9479
9480 // ARC ABI implementation.
9481 namespace {
9482
9483 class ARCABIInfo : public DefaultABIInfo {
9484 public:
9485 using DefaultABIInfo::DefaultABIInfo;
9486
9487 private:
9488 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9489 QualType Ty) const override;
9490
updateState(const ABIArgInfo & Info,QualType Ty,CCState & State) const9491 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
9492 if (!State.FreeRegs)
9493 return;
9494 if (Info.isIndirect() && Info.getInReg())
9495 State.FreeRegs--;
9496 else if (Info.isDirect() && Info.getInReg()) {
9497 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
9498 if (sz < State.FreeRegs)
9499 State.FreeRegs -= sz;
9500 else
9501 State.FreeRegs = 0;
9502 }
9503 }
9504
computeInfo(CGFunctionInfo & FI) const9505 void computeInfo(CGFunctionInfo &FI) const override {
9506 CCState State(FI);
9507 // ARC uses 8 registers to pass arguments.
9508 State.FreeRegs = 8;
9509
9510 if (!getCXXABI().classifyReturnType(FI))
9511 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9512 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
9513 for (auto &I : FI.arguments()) {
9514 I.info = classifyArgumentType(I.type, State.FreeRegs);
9515 updateState(I.info, I.type, State);
9516 }
9517 }
9518
9519 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
9520 ABIArgInfo getIndirectByValue(QualType Ty) const;
9521 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
9522 ABIArgInfo classifyReturnType(QualType RetTy) const;
9523 };
9524
9525 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
9526 public:
ARCTargetCodeGenInfo(CodeGenTypes & CGT)9527 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
9528 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
9529 };
9530
9531
getIndirectByRef(QualType Ty,bool HasFreeRegs) const9532 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
9533 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
9534 getNaturalAlignIndirect(Ty, false);
9535 }
9536
getIndirectByValue(QualType Ty) const9537 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
9538 // Compute the byval alignment.
9539 const unsigned MinABIStackAlignInBytes = 4;
9540 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
9541 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
9542 TypeAlign > MinABIStackAlignInBytes);
9543 }
9544
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const9545 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9546 QualType Ty) const {
9547 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
9548 getContext().getTypeInfoInChars(Ty),
9549 CharUnits::fromQuantity(4), true);
9550 }
9551
classifyArgumentType(QualType Ty,uint8_t FreeRegs) const9552 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
9553 uint8_t FreeRegs) const {
9554 // Handle the generic C++ ABI.
9555 const RecordType *RT = Ty->getAs<RecordType>();
9556 if (RT) {
9557 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
9558 if (RAA == CGCXXABI::RAA_Indirect)
9559 return getIndirectByRef(Ty, FreeRegs > 0);
9560
9561 if (RAA == CGCXXABI::RAA_DirectInMemory)
9562 return getIndirectByValue(Ty);
9563 }
9564
9565 // Treat an enum type as its underlying type.
9566 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9567 Ty = EnumTy->getDecl()->getIntegerType();
9568
9569 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
9570
9571 if (isAggregateTypeForABI(Ty)) {
9572 // Structures with flexible arrays are always indirect.
9573 if (RT && RT->getDecl()->hasFlexibleArrayMember())
9574 return getIndirectByValue(Ty);
9575
9576 // Ignore empty structs/unions.
9577 if (isEmptyRecord(getContext(), Ty, true))
9578 return ABIArgInfo::getIgnore();
9579
9580 llvm::LLVMContext &LLVMContext = getVMContext();
9581
9582 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
9583 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
9584 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
9585
9586 return FreeRegs >= SizeInRegs ?
9587 ABIArgInfo::getDirectInReg(Result) :
9588 ABIArgInfo::getDirect(Result, 0, nullptr, false);
9589 }
9590
9591 if (const auto *EIT = Ty->getAs<ExtIntType>())
9592 if (EIT->getNumBits() > 64)
9593 return getIndirectByValue(Ty);
9594
9595 return isPromotableIntegerTypeForABI(Ty)
9596 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
9597 : ABIArgInfo::getExtend(Ty))
9598 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
9599 : ABIArgInfo::getDirect());
9600 }
9601
classifyReturnType(QualType RetTy) const9602 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
9603 if (RetTy->isAnyComplexType())
9604 return ABIArgInfo::getDirectInReg();
9605
9606 // Arguments of size > 4 registers are indirect.
9607 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
9608 if (RetSize > 4)
9609 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
9610
9611 return DefaultABIInfo::classifyReturnType(RetTy);
9612 }
9613
9614 } // End anonymous namespace.
9615
9616 //===----------------------------------------------------------------------===//
9617 // XCore ABI Implementation
9618 //===----------------------------------------------------------------------===//
9619
9620 namespace {
9621
9622 /// A SmallStringEnc instance is used to build up the TypeString by passing
9623 /// it by reference between functions that append to it.
9624 typedef llvm::SmallString<128> SmallStringEnc;
9625
9626 /// TypeStringCache caches the meta encodings of Types.
9627 ///
9628 /// The reason for caching TypeStrings is two fold:
9629 /// 1. To cache a type's encoding for later uses;
9630 /// 2. As a means to break recursive member type inclusion.
9631 ///
9632 /// A cache Entry can have a Status of:
9633 /// NonRecursive: The type encoding is not recursive;
9634 /// Recursive: The type encoding is recursive;
9635 /// Incomplete: An incomplete TypeString;
9636 /// IncompleteUsed: An incomplete TypeString that has been used in a
9637 /// Recursive type encoding.
9638 ///
9639 /// A NonRecursive entry will have all of its sub-members expanded as fully
9640 /// as possible. Whilst it may contain types which are recursive, the type
9641 /// itself is not recursive and thus its encoding may be safely used whenever
9642 /// the type is encountered.
9643 ///
9644 /// A Recursive entry will have all of its sub-members expanded as fully as
9645 /// possible. The type itself is recursive and it may contain other types which
9646 /// are recursive. The Recursive encoding must not be used during the expansion
9647 /// of a recursive type's recursive branch. For simplicity the code uses
9648 /// IncompleteCount to reject all usage of Recursive encodings for member types.
9649 ///
9650 /// An Incomplete entry is always a RecordType and only encodes its
9651 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
9652 /// are placed into the cache during type expansion as a means to identify and
9653 /// handle recursive inclusion of types as sub-members. If there is recursion
9654 /// the entry becomes IncompleteUsed.
9655 ///
9656 /// During the expansion of a RecordType's members:
9657 ///
9658 /// If the cache contains a NonRecursive encoding for the member type, the
9659 /// cached encoding is used;
9660 ///
9661 /// If the cache contains a Recursive encoding for the member type, the
9662 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
9663 ///
9664 /// If the member is a RecordType, an Incomplete encoding is placed into the
9665 /// cache to break potential recursive inclusion of itself as a sub-member;
9666 ///
9667 /// Once a member RecordType has been expanded, its temporary incomplete
9668 /// entry is removed from the cache. If a Recursive encoding was swapped out
9669 /// it is swapped back in;
9670 ///
9671 /// If an incomplete entry is used to expand a sub-member, the incomplete
9672 /// entry is marked as IncompleteUsed. The cache keeps count of how many
9673 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
9674 ///
9675 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
9676 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
9677 /// Else the member is part of a recursive type and thus the recursion has
9678 /// been exited too soon for the encoding to be correct for the member.
9679 ///
9680 class TypeStringCache {
9681 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
9682 struct Entry {
9683 std::string Str; // The encoded TypeString for the type.
9684 enum Status State; // Information about the encoding in 'Str'.
9685 std::string Swapped; // A temporary place holder for a Recursive encoding
9686 // during the expansion of RecordType's members.
9687 };
9688 std::map<const IdentifierInfo *, struct Entry> Map;
9689 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
9690 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
9691 public:
TypeStringCache()9692 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
9693 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
9694 bool removeIncomplete(const IdentifierInfo *ID);
9695 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
9696 bool IsRecursive);
9697 StringRef lookupStr(const IdentifierInfo *ID);
9698 };
9699
9700 /// TypeString encodings for enum & union fields must be order.
9701 /// FieldEncoding is a helper for this ordering process.
9702 class FieldEncoding {
9703 bool HasName;
9704 std::string Enc;
9705 public:
FieldEncoding(bool b,SmallStringEnc & e)9706 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
str()9707 StringRef str() { return Enc; }
operator <(const FieldEncoding & rhs) const9708 bool operator<(const FieldEncoding &rhs) const {
9709 if (HasName != rhs.HasName) return HasName;
9710 return Enc < rhs.Enc;
9711 }
9712 };
9713
9714 class XCoreABIInfo : public DefaultABIInfo {
9715 public:
XCoreABIInfo(CodeGen::CodeGenTypes & CGT)9716 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9717 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9718 QualType Ty) const override;
9719 };
9720
9721 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
9722 mutable TypeStringCache TSC;
9723 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
9724 const CodeGen::CodeGenModule &M) const;
9725
9726 public:
XCoreTargetCodeGenInfo(CodeGenTypes & CGT)9727 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
9728 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
9729 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
9730 const llvm::MapVector<GlobalDecl, StringRef>
9731 &MangledDeclNames) const override;
9732 };
9733
9734 } // End anonymous namespace.
9735
9736 // TODO: this implementation is likely now redundant with the default
9737 // EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const9738 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9739 QualType Ty) const {
9740 CGBuilderTy &Builder = CGF.Builder;
9741
9742 // Get the VAList.
9743 CharUnits SlotSize = CharUnits::fromQuantity(4);
9744 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
9745
9746 // Handle the argument.
9747 ABIArgInfo AI = classifyArgumentType(Ty);
9748 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
9749 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9750 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9751 AI.setCoerceToType(ArgTy);
9752 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9753
9754 Address Val = Address::invalid();
9755 CharUnits ArgSize = CharUnits::Zero();
9756 switch (AI.getKind()) {
9757 case ABIArgInfo::Expand:
9758 case ABIArgInfo::CoerceAndExpand:
9759 case ABIArgInfo::InAlloca:
9760 llvm_unreachable("Unsupported ABI kind for va_arg");
9761 case ABIArgInfo::Ignore:
9762 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
9763 ArgSize = CharUnits::Zero();
9764 break;
9765 case ABIArgInfo::Extend:
9766 case ABIArgInfo::Direct:
9767 Val = Builder.CreateBitCast(AP, ArgPtrTy);
9768 ArgSize = CharUnits::fromQuantity(
9769 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
9770 ArgSize = ArgSize.alignTo(SlotSize);
9771 break;
9772 case ABIArgInfo::Indirect:
9773 case ABIArgInfo::IndirectAliased:
9774 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
9775 Val = Address(Builder.CreateLoad(Val), TypeAlign);
9776 ArgSize = SlotSize;
9777 break;
9778 }
9779
9780 // Increment the VAList.
9781 if (!ArgSize.isZero()) {
9782 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
9783 Builder.CreateStore(APN.getPointer(), VAListAddr);
9784 }
9785
9786 return Val;
9787 }
9788
9789 /// During the expansion of a RecordType, an incomplete TypeString is placed
9790 /// into the cache as a means to identify and break recursion.
9791 /// If there is a Recursive encoding in the cache, it is swapped out and will
9792 /// be reinserted by removeIncomplete().
9793 /// All other types of encoding should have been used rather than arriving here.
addIncomplete(const IdentifierInfo * ID,std::string StubEnc)9794 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
9795 std::string StubEnc) {
9796 if (!ID)
9797 return;
9798 Entry &E = Map[ID];
9799 assert( (E.Str.empty() || E.State == Recursive) &&
9800 "Incorrectly use of addIncomplete");
9801 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
9802 E.Swapped.swap(E.Str); // swap out the Recursive
9803 E.Str.swap(StubEnc);
9804 E.State = Incomplete;
9805 ++IncompleteCount;
9806 }
9807
9808 /// Once the RecordType has been expanded, the temporary incomplete TypeString
9809 /// must be removed from the cache.
9810 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
9811 /// Returns true if the RecordType was defined recursively.
removeIncomplete(const IdentifierInfo * ID)9812 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
9813 if (!ID)
9814 return false;
9815 auto I = Map.find(ID);
9816 assert(I != Map.end() && "Entry not present");
9817 Entry &E = I->second;
9818 assert( (E.State == Incomplete ||
9819 E.State == IncompleteUsed) &&
9820 "Entry must be an incomplete type");
9821 bool IsRecursive = false;
9822 if (E.State == IncompleteUsed) {
9823 // We made use of our Incomplete encoding, thus we are recursive.
9824 IsRecursive = true;
9825 --IncompleteUsedCount;
9826 }
9827 if (E.Swapped.empty())
9828 Map.erase(I);
9829 else {
9830 // Swap the Recursive back.
9831 E.Swapped.swap(E.Str);
9832 E.Swapped.clear();
9833 E.State = Recursive;
9834 }
9835 --IncompleteCount;
9836 return IsRecursive;
9837 }
9838
9839 /// Add the encoded TypeString to the cache only if it is NonRecursive or
9840 /// Recursive (viz: all sub-members were expanded as fully as possible).
addIfComplete(const IdentifierInfo * ID,StringRef Str,bool IsRecursive)9841 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
9842 bool IsRecursive) {
9843 if (!ID || IncompleteUsedCount)
9844 return; // No key or it is is an incomplete sub-type so don't add.
9845 Entry &E = Map[ID];
9846 if (IsRecursive && !E.Str.empty()) {
9847 assert(E.State==Recursive && E.Str.size() == Str.size() &&
9848 "This is not the same Recursive entry");
9849 // The parent container was not recursive after all, so we could have used
9850 // this Recursive sub-member entry after all, but we assumed the worse when
9851 // we started viz: IncompleteCount!=0.
9852 return;
9853 }
9854 assert(E.Str.empty() && "Entry already present");
9855 E.Str = Str.str();
9856 E.State = IsRecursive? Recursive : NonRecursive;
9857 }
9858
9859 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
9860 /// are recursively expanding a type (IncompleteCount != 0) and the cached
9861 /// encoding is Recursive, return an empty StringRef.
lookupStr(const IdentifierInfo * ID)9862 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
9863 if (!ID)
9864 return StringRef(); // We have no key.
9865 auto I = Map.find(ID);
9866 if (I == Map.end())
9867 return StringRef(); // We have no encoding.
9868 Entry &E = I->second;
9869 if (E.State == Recursive && IncompleteCount)
9870 return StringRef(); // We don't use Recursive encodings for member types.
9871
9872 if (E.State == Incomplete) {
9873 // The incomplete type is being used to break out of recursion.
9874 E.State = IncompleteUsed;
9875 ++IncompleteUsedCount;
9876 }
9877 return E.Str;
9878 }
9879
9880 /// The XCore ABI includes a type information section that communicates symbol
9881 /// type information to the linker. The linker uses this information to verify
9882 /// safety/correctness of things such as array bound and pointers et al.
9883 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
9884 /// This type information (TypeString) is emitted into meta data for all global
9885 /// symbols: definitions, declarations, functions & variables.
9886 ///
9887 /// The TypeString carries type, qualifier, name, size & value details.
9888 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
9889 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
9890 /// The output is tested by test/CodeGen/xcore-stringtype.c.
9891 ///
9892 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9893 const CodeGen::CodeGenModule &CGM,
9894 TypeStringCache &TSC);
9895
9896 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
emitTargetMD(const Decl * D,llvm::GlobalValue * GV,const CodeGen::CodeGenModule & CGM) const9897 void XCoreTargetCodeGenInfo::emitTargetMD(
9898 const Decl *D, llvm::GlobalValue *GV,
9899 const CodeGen::CodeGenModule &CGM) const {
9900 SmallStringEnc Enc;
9901 if (getTypeString(Enc, D, CGM, TSC)) {
9902 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
9903 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
9904 llvm::MDString::get(Ctx, Enc.str())};
9905 llvm::NamedMDNode *MD =
9906 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
9907 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
9908 }
9909 }
9910
emitTargetMetadata(CodeGen::CodeGenModule & CGM,const llvm::MapVector<GlobalDecl,StringRef> & MangledDeclNames) const9911 void XCoreTargetCodeGenInfo::emitTargetMetadata(
9912 CodeGen::CodeGenModule &CGM,
9913 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
9914 // Warning, new MangledDeclNames may be appended within this loop.
9915 // We rely on MapVector insertions adding new elements to the end
9916 // of the container.
9917 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
9918 auto Val = *(MangledDeclNames.begin() + I);
9919 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
9920 if (GV) {
9921 const Decl *D = Val.first.getDecl()->getMostRecentDecl();
9922 emitTargetMD(D, GV, CGM);
9923 }
9924 }
9925 }
9926 //===----------------------------------------------------------------------===//
9927 // SPIR ABI Implementation
9928 //===----------------------------------------------------------------------===//
9929
9930 namespace {
9931 class SPIRABIInfo : public DefaultABIInfo {
9932 public:
SPIRABIInfo(CodeGenTypes & CGT)9933 SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
9934
9935 private:
9936 void setCCs();
9937 };
9938 } // end anonymous namespace
9939 namespace {
9940 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
9941 public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)9942 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
9943 : TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {}
9944 unsigned getOpenCLKernelCallingConv() const override;
9945 };
9946
9947 } // End anonymous namespace.
setCCs()9948 void SPIRABIInfo::setCCs() {
9949 assert(getRuntimeCC() == llvm::CallingConv::C);
9950 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
9951 }
9952
9953 namespace clang {
9954 namespace CodeGen {
computeSPIRKernelABIInfo(CodeGenModule & CGM,CGFunctionInfo & FI)9955 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
9956 DefaultABIInfo SPIRABI(CGM.getTypes());
9957 SPIRABI.computeInfo(FI);
9958 }
9959 }
9960 }
9961
getOpenCLKernelCallingConv() const9962 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9963 return llvm::CallingConv::SPIR_KERNEL;
9964 }
9965
9966 static bool appendType(SmallStringEnc &Enc, QualType QType,
9967 const CodeGen::CodeGenModule &CGM,
9968 TypeStringCache &TSC);
9969
9970 /// Helper function for appendRecordType().
9971 /// Builds a SmallVector containing the encoded field types in declaration
9972 /// order.
extractFieldType(SmallVectorImpl<FieldEncoding> & FE,const RecordDecl * RD,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)9973 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
9974 const RecordDecl *RD,
9975 const CodeGen::CodeGenModule &CGM,
9976 TypeStringCache &TSC) {
9977 for (const auto *Field : RD->fields()) {
9978 SmallStringEnc Enc;
9979 Enc += "m(";
9980 Enc += Field->getName();
9981 Enc += "){";
9982 if (Field->isBitField()) {
9983 Enc += "b(";
9984 llvm::raw_svector_ostream OS(Enc);
9985 OS << Field->getBitWidthValue(CGM.getContext());
9986 Enc += ':';
9987 }
9988 if (!appendType(Enc, Field->getType(), CGM, TSC))
9989 return false;
9990 if (Field->isBitField())
9991 Enc += ')';
9992 Enc += '}';
9993 FE.emplace_back(!Field->getName().empty(), Enc);
9994 }
9995 return true;
9996 }
9997
9998 /// Appends structure and union types to Enc and adds encoding to cache.
9999 /// Recursively calls appendType (via extractFieldType) for each field.
10000 /// Union types have their fields ordered according to the ABI.
appendRecordType(SmallStringEnc & Enc,const RecordType * RT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,const IdentifierInfo * ID)10001 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
10002 const CodeGen::CodeGenModule &CGM,
10003 TypeStringCache &TSC, const IdentifierInfo *ID) {
10004 // Append the cached TypeString if we have one.
10005 StringRef TypeString = TSC.lookupStr(ID);
10006 if (!TypeString.empty()) {
10007 Enc += TypeString;
10008 return true;
10009 }
10010
10011 // Start to emit an incomplete TypeString.
10012 size_t Start = Enc.size();
10013 Enc += (RT->isUnionType()? 'u' : 's');
10014 Enc += '(';
10015 if (ID)
10016 Enc += ID->getName();
10017 Enc += "){";
10018
10019 // We collect all encoded fields and order as necessary.
10020 bool IsRecursive = false;
10021 const RecordDecl *RD = RT->getDecl()->getDefinition();
10022 if (RD && !RD->field_empty()) {
10023 // An incomplete TypeString stub is placed in the cache for this RecordType
10024 // so that recursive calls to this RecordType will use it whilst building a
10025 // complete TypeString for this RecordType.
10026 SmallVector<FieldEncoding, 16> FE;
10027 std::string StubEnc(Enc.substr(Start).str());
10028 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
10029 TSC.addIncomplete(ID, std::move(StubEnc));
10030 if (!extractFieldType(FE, RD, CGM, TSC)) {
10031 (void) TSC.removeIncomplete(ID);
10032 return false;
10033 }
10034 IsRecursive = TSC.removeIncomplete(ID);
10035 // The ABI requires unions to be sorted but not structures.
10036 // See FieldEncoding::operator< for sort algorithm.
10037 if (RT->isUnionType())
10038 llvm::sort(FE);
10039 // We can now complete the TypeString.
10040 unsigned E = FE.size();
10041 for (unsigned I = 0; I != E; ++I) {
10042 if (I)
10043 Enc += ',';
10044 Enc += FE[I].str();
10045 }
10046 }
10047 Enc += '}';
10048 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
10049 return true;
10050 }
10051
10052 /// Appends enum types to Enc and adds the encoding to the cache.
appendEnumType(SmallStringEnc & Enc,const EnumType * ET,TypeStringCache & TSC,const IdentifierInfo * ID)10053 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
10054 TypeStringCache &TSC,
10055 const IdentifierInfo *ID) {
10056 // Append the cached TypeString if we have one.
10057 StringRef TypeString = TSC.lookupStr(ID);
10058 if (!TypeString.empty()) {
10059 Enc += TypeString;
10060 return true;
10061 }
10062
10063 size_t Start = Enc.size();
10064 Enc += "e(";
10065 if (ID)
10066 Enc += ID->getName();
10067 Enc += "){";
10068
10069 // We collect all encoded enumerations and order them alphanumerically.
10070 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
10071 SmallVector<FieldEncoding, 16> FE;
10072 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
10073 ++I) {
10074 SmallStringEnc EnumEnc;
10075 EnumEnc += "m(";
10076 EnumEnc += I->getName();
10077 EnumEnc += "){";
10078 I->getInitVal().toString(EnumEnc);
10079 EnumEnc += '}';
10080 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
10081 }
10082 llvm::sort(FE);
10083 unsigned E = FE.size();
10084 for (unsigned I = 0; I != E; ++I) {
10085 if (I)
10086 Enc += ',';
10087 Enc += FE[I].str();
10088 }
10089 }
10090 Enc += '}';
10091 TSC.addIfComplete(ID, Enc.substr(Start), false);
10092 return true;
10093 }
10094
10095 /// Appends type's qualifier to Enc.
10096 /// This is done prior to appending the type's encoding.
appendQualifier(SmallStringEnc & Enc,QualType QT)10097 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
10098 // Qualifiers are emitted in alphabetical order.
10099 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
10100 int Lookup = 0;
10101 if (QT.isConstQualified())
10102 Lookup += 1<<0;
10103 if (QT.isRestrictQualified())
10104 Lookup += 1<<1;
10105 if (QT.isVolatileQualified())
10106 Lookup += 1<<2;
10107 Enc += Table[Lookup];
10108 }
10109
10110 /// Appends built-in types to Enc.
appendBuiltinType(SmallStringEnc & Enc,const BuiltinType * BT)10111 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
10112 const char *EncType;
10113 switch (BT->getKind()) {
10114 case BuiltinType::Void:
10115 EncType = "0";
10116 break;
10117 case BuiltinType::Bool:
10118 EncType = "b";
10119 break;
10120 case BuiltinType::Char_U:
10121 EncType = "uc";
10122 break;
10123 case BuiltinType::UChar:
10124 EncType = "uc";
10125 break;
10126 case BuiltinType::SChar:
10127 EncType = "sc";
10128 break;
10129 case BuiltinType::UShort:
10130 EncType = "us";
10131 break;
10132 case BuiltinType::Short:
10133 EncType = "ss";
10134 break;
10135 case BuiltinType::UInt:
10136 EncType = "ui";
10137 break;
10138 case BuiltinType::Int:
10139 EncType = "si";
10140 break;
10141 case BuiltinType::ULong:
10142 EncType = "ul";
10143 break;
10144 case BuiltinType::Long:
10145 EncType = "sl";
10146 break;
10147 case BuiltinType::ULongLong:
10148 EncType = "ull";
10149 break;
10150 case BuiltinType::LongLong:
10151 EncType = "sll";
10152 break;
10153 case BuiltinType::Float:
10154 EncType = "ft";
10155 break;
10156 case BuiltinType::Double:
10157 EncType = "d";
10158 break;
10159 case BuiltinType::LongDouble:
10160 EncType = "ld";
10161 break;
10162 default:
10163 return false;
10164 }
10165 Enc += EncType;
10166 return true;
10167 }
10168
10169 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
appendPointerType(SmallStringEnc & Enc,const PointerType * PT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)10170 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
10171 const CodeGen::CodeGenModule &CGM,
10172 TypeStringCache &TSC) {
10173 Enc += "p(";
10174 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
10175 return false;
10176 Enc += ')';
10177 return true;
10178 }
10179
10180 /// Appends array encoding to Enc before calling appendType for the element.
appendArrayType(SmallStringEnc & Enc,QualType QT,const ArrayType * AT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,StringRef NoSizeEnc)10181 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
10182 const ArrayType *AT,
10183 const CodeGen::CodeGenModule &CGM,
10184 TypeStringCache &TSC, StringRef NoSizeEnc) {
10185 if (AT->getSizeModifier() != ArrayType::Normal)
10186 return false;
10187 Enc += "a(";
10188 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
10189 CAT->getSize().toStringUnsigned(Enc);
10190 else
10191 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
10192 Enc += ':';
10193 // The Qualifiers should be attached to the type rather than the array.
10194 appendQualifier(Enc, QT);
10195 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
10196 return false;
10197 Enc += ')';
10198 return true;
10199 }
10200
10201 /// Appends a function encoding to Enc, calling appendType for the return type
10202 /// and the arguments.
appendFunctionType(SmallStringEnc & Enc,const FunctionType * FT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)10203 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
10204 const CodeGen::CodeGenModule &CGM,
10205 TypeStringCache &TSC) {
10206 Enc += "f{";
10207 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
10208 return false;
10209 Enc += "}(";
10210 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
10211 // N.B. we are only interested in the adjusted param types.
10212 auto I = FPT->param_type_begin();
10213 auto E = FPT->param_type_end();
10214 if (I != E) {
10215 do {
10216 if (!appendType(Enc, *I, CGM, TSC))
10217 return false;
10218 ++I;
10219 if (I != E)
10220 Enc += ',';
10221 } while (I != E);
10222 if (FPT->isVariadic())
10223 Enc += ",va";
10224 } else {
10225 if (FPT->isVariadic())
10226 Enc += "va";
10227 else
10228 Enc += '0';
10229 }
10230 }
10231 Enc += ')';
10232 return true;
10233 }
10234
10235 /// Handles the type's qualifier before dispatching a call to handle specific
10236 /// type encodings.
appendType(SmallStringEnc & Enc,QualType QType,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)10237 static bool appendType(SmallStringEnc &Enc, QualType QType,
10238 const CodeGen::CodeGenModule &CGM,
10239 TypeStringCache &TSC) {
10240
10241 QualType QT = QType.getCanonicalType();
10242
10243 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
10244 // The Qualifiers should be attached to the type rather than the array.
10245 // Thus we don't call appendQualifier() here.
10246 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
10247
10248 appendQualifier(Enc, QT);
10249
10250 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
10251 return appendBuiltinType(Enc, BT);
10252
10253 if (const PointerType *PT = QT->getAs<PointerType>())
10254 return appendPointerType(Enc, PT, CGM, TSC);
10255
10256 if (const EnumType *ET = QT->getAs<EnumType>())
10257 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
10258
10259 if (const RecordType *RT = QT->getAsStructureType())
10260 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10261
10262 if (const RecordType *RT = QT->getAsUnionType())
10263 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10264
10265 if (const FunctionType *FT = QT->getAs<FunctionType>())
10266 return appendFunctionType(Enc, FT, CGM, TSC);
10267
10268 return false;
10269 }
10270
getTypeString(SmallStringEnc & Enc,const Decl * D,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)10271 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
10272 const CodeGen::CodeGenModule &CGM,
10273 TypeStringCache &TSC) {
10274 if (!D)
10275 return false;
10276
10277 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
10278 if (FD->getLanguageLinkage() != CLanguageLinkage)
10279 return false;
10280 return appendType(Enc, FD->getType(), CGM, TSC);
10281 }
10282
10283 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
10284 if (VD->getLanguageLinkage() != CLanguageLinkage)
10285 return false;
10286 QualType QT = VD->getType().getCanonicalType();
10287 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
10288 // Global ArrayTypes are given a size of '*' if the size is unknown.
10289 // The Qualifiers should be attached to the type rather than the array.
10290 // Thus we don't call appendQualifier() here.
10291 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
10292 }
10293 return appendType(Enc, QT, CGM, TSC);
10294 }
10295 return false;
10296 }
10297
10298 //===----------------------------------------------------------------------===//
10299 // RISCV ABI Implementation
10300 //===----------------------------------------------------------------------===//
10301
10302 namespace {
10303 class RISCVABIInfo : public DefaultABIInfo {
10304 private:
10305 // Size of the integer ('x') registers in bits.
10306 unsigned XLen;
10307 // Size of the floating point ('f') registers in bits. Note that the target
10308 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
10309 // with soft float ABI has FLen==0).
10310 unsigned FLen;
10311 static const int NumArgGPRs = 8;
10312 static const int NumArgFPRs = 8;
10313 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10314 llvm::Type *&Field1Ty,
10315 CharUnits &Field1Off,
10316 llvm::Type *&Field2Ty,
10317 CharUnits &Field2Off) const;
10318
10319 public:
RISCVABIInfo(CodeGen::CodeGenTypes & CGT,unsigned XLen,unsigned FLen)10320 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
10321 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
10322
10323 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
10324 // non-virtual, but computeInfo is virtual, so we overload it.
10325 void computeInfo(CGFunctionInfo &FI) const override;
10326
10327 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
10328 int &ArgFPRsLeft) const;
10329 ABIArgInfo classifyReturnType(QualType RetTy) const;
10330
10331 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10332 QualType Ty) const override;
10333
10334 ABIArgInfo extendType(QualType Ty) const;
10335
10336 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10337 CharUnits &Field1Off, llvm::Type *&Field2Ty,
10338 CharUnits &Field2Off, int &NeededArgGPRs,
10339 int &NeededArgFPRs) const;
10340 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
10341 CharUnits Field1Off,
10342 llvm::Type *Field2Ty,
10343 CharUnits Field2Off) const;
10344 };
10345 } // end anonymous namespace
10346
computeInfo(CGFunctionInfo & FI) const10347 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
10348 QualType RetTy = FI.getReturnType();
10349 if (!getCXXABI().classifyReturnType(FI))
10350 FI.getReturnInfo() = classifyReturnType(RetTy);
10351
10352 // IsRetIndirect is true if classifyArgumentType indicated the value should
10353 // be passed indirect, or if the type size is a scalar greater than 2*XLen
10354 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
10355 // in LLVM IR, relying on the backend lowering code to rewrite the argument
10356 // list and pass indirectly on RV32.
10357 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
10358 if (!IsRetIndirect && RetTy->isScalarType() &&
10359 getContext().getTypeSize(RetTy) > (2 * XLen)) {
10360 if (RetTy->isComplexType() && FLen) {
10361 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
10362 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
10363 } else {
10364 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
10365 IsRetIndirect = true;
10366 }
10367 }
10368
10369 // We must track the number of GPRs used in order to conform to the RISC-V
10370 // ABI, as integer scalars passed in registers should have signext/zeroext
10371 // when promoted, but are anyext if passed on the stack. As GPR usage is
10372 // different for variadic arguments, we must also track whether we are
10373 // examining a vararg or not.
10374 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
10375 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
10376 int NumFixedArgs = FI.getNumRequiredArgs();
10377
10378 int ArgNum = 0;
10379 for (auto &ArgInfo : FI.arguments()) {
10380 bool IsFixed = ArgNum < NumFixedArgs;
10381 ArgInfo.info =
10382 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
10383 ArgNum++;
10384 }
10385 }
10386
10387 // Returns true if the struct is a potential candidate for the floating point
10388 // calling convention. If this function returns true, the caller is
10389 // responsible for checking that if there is only a single field then that
10390 // field is a float.
detectFPCCEligibleStructHelper(QualType Ty,CharUnits CurOff,llvm::Type * & Field1Ty,CharUnits & Field1Off,llvm::Type * & Field2Ty,CharUnits & Field2Off) const10391 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10392 llvm::Type *&Field1Ty,
10393 CharUnits &Field1Off,
10394 llvm::Type *&Field2Ty,
10395 CharUnits &Field2Off) const {
10396 bool IsInt = Ty->isIntegralOrEnumerationType();
10397 bool IsFloat = Ty->isRealFloatingType();
10398
10399 if (IsInt || IsFloat) {
10400 uint64_t Size = getContext().getTypeSize(Ty);
10401 if (IsInt && Size > XLen)
10402 return false;
10403 // Can't be eligible if larger than the FP registers. Half precision isn't
10404 // currently supported on RISC-V and the ABI hasn't been confirmed, so
10405 // default to the integer ABI in that case.
10406 if (IsFloat && (Size > FLen || Size < 32))
10407 return false;
10408 // Can't be eligible if an integer type was already found (int+int pairs
10409 // are not eligible).
10410 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
10411 return false;
10412 if (!Field1Ty) {
10413 Field1Ty = CGT.ConvertType(Ty);
10414 Field1Off = CurOff;
10415 return true;
10416 }
10417 if (!Field2Ty) {
10418 Field2Ty = CGT.ConvertType(Ty);
10419 Field2Off = CurOff;
10420 return true;
10421 }
10422 return false;
10423 }
10424
10425 if (auto CTy = Ty->getAs<ComplexType>()) {
10426 if (Field1Ty)
10427 return false;
10428 QualType EltTy = CTy->getElementType();
10429 if (getContext().getTypeSize(EltTy) > FLen)
10430 return false;
10431 Field1Ty = CGT.ConvertType(EltTy);
10432 Field1Off = CurOff;
10433 Field2Ty = Field1Ty;
10434 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
10435 return true;
10436 }
10437
10438 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
10439 uint64_t ArraySize = ATy->getSize().getZExtValue();
10440 QualType EltTy = ATy->getElementType();
10441 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
10442 for (uint64_t i = 0; i < ArraySize; ++i) {
10443 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
10444 Field1Off, Field2Ty, Field2Off);
10445 if (!Ret)
10446 return false;
10447 CurOff += EltSize;
10448 }
10449 return true;
10450 }
10451
10452 if (const auto *RTy = Ty->getAs<RecordType>()) {
10453 // Structures with either a non-trivial destructor or a non-trivial
10454 // copy constructor are not eligible for the FP calling convention.
10455 if (getRecordArgABI(Ty, CGT.getCXXABI()))
10456 return false;
10457 if (isEmptyRecord(getContext(), Ty, true))
10458 return true;
10459 const RecordDecl *RD = RTy->getDecl();
10460 // Unions aren't eligible unless they're empty (which is caught above).
10461 if (RD->isUnion())
10462 return false;
10463 int ZeroWidthBitFieldCount = 0;
10464 for (const FieldDecl *FD : RD->fields()) {
10465 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
10466 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
10467 QualType QTy = FD->getType();
10468 if (FD->isBitField()) {
10469 unsigned BitWidth = FD->getBitWidthValue(getContext());
10470 // Allow a bitfield with a type greater than XLen as long as the
10471 // bitwidth is XLen or less.
10472 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
10473 QTy = getContext().getIntTypeForBitwidth(XLen, false);
10474 if (BitWidth == 0) {
10475 ZeroWidthBitFieldCount++;
10476 continue;
10477 }
10478 }
10479
10480 bool Ret = detectFPCCEligibleStructHelper(
10481 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
10482 Field1Ty, Field1Off, Field2Ty, Field2Off);
10483 if (!Ret)
10484 return false;
10485
10486 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
10487 // or int+fp structs, but are ignored for a struct with an fp field and
10488 // any number of zero-width bitfields.
10489 if (Field2Ty && ZeroWidthBitFieldCount > 0)
10490 return false;
10491 }
10492 return Field1Ty != nullptr;
10493 }
10494
10495 return false;
10496 }
10497
10498 // Determine if a struct is eligible for passing according to the floating
10499 // point calling convention (i.e., when flattened it contains a single fp
10500 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
10501 // NeededArgGPRs are incremented appropriately.
detectFPCCEligibleStruct(QualType Ty,llvm::Type * & Field1Ty,CharUnits & Field1Off,llvm::Type * & Field2Ty,CharUnits & Field2Off,int & NeededArgGPRs,int & NeededArgFPRs) const10502 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10503 CharUnits &Field1Off,
10504 llvm::Type *&Field2Ty,
10505 CharUnits &Field2Off,
10506 int &NeededArgGPRs,
10507 int &NeededArgFPRs) const {
10508 Field1Ty = nullptr;
10509 Field2Ty = nullptr;
10510 NeededArgGPRs = 0;
10511 NeededArgFPRs = 0;
10512 bool IsCandidate = detectFPCCEligibleStructHelper(
10513 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
10514 // Not really a candidate if we have a single int but no float.
10515 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
10516 return false;
10517 if (!IsCandidate)
10518 return false;
10519 if (Field1Ty && Field1Ty->isFloatingPointTy())
10520 NeededArgFPRs++;
10521 else if (Field1Ty)
10522 NeededArgGPRs++;
10523 if (Field2Ty && Field2Ty->isFloatingPointTy())
10524 NeededArgFPRs++;
10525 else if (Field2Ty)
10526 NeededArgGPRs++;
10527 return true;
10528 }
10529
10530 // Call getCoerceAndExpand for the two-element flattened struct described by
10531 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
10532 // appropriate coerceToType and unpaddedCoerceToType.
coerceAndExpandFPCCEligibleStruct(llvm::Type * Field1Ty,CharUnits Field1Off,llvm::Type * Field2Ty,CharUnits Field2Off) const10533 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
10534 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
10535 CharUnits Field2Off) const {
10536 SmallVector<llvm::Type *, 3> CoerceElts;
10537 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
10538 if (!Field1Off.isZero())
10539 CoerceElts.push_back(llvm::ArrayType::get(
10540 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
10541
10542 CoerceElts.push_back(Field1Ty);
10543 UnpaddedCoerceElts.push_back(Field1Ty);
10544
10545 if (!Field2Ty) {
10546 return ABIArgInfo::getCoerceAndExpand(
10547 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
10548 UnpaddedCoerceElts[0]);
10549 }
10550
10551 CharUnits Field2Align =
10552 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
10553 CharUnits Field1End = Field1Off +
10554 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
10555 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
10556
10557 CharUnits Padding = CharUnits::Zero();
10558 if (Field2Off > Field2OffNoPadNoPack)
10559 Padding = Field2Off - Field2OffNoPadNoPack;
10560 else if (Field2Off != Field2Align && Field2Off > Field1End)
10561 Padding = Field2Off - Field1End;
10562
10563 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
10564
10565 if (!Padding.isZero())
10566 CoerceElts.push_back(llvm::ArrayType::get(
10567 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
10568
10569 CoerceElts.push_back(Field2Ty);
10570 UnpaddedCoerceElts.push_back(Field2Ty);
10571
10572 auto CoerceToType =
10573 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
10574 auto UnpaddedCoerceToType =
10575 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
10576
10577 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
10578 }
10579
classifyArgumentType(QualType Ty,bool IsFixed,int & ArgGPRsLeft,int & ArgFPRsLeft) const10580 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
10581 int &ArgGPRsLeft,
10582 int &ArgFPRsLeft) const {
10583 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
10584 Ty = useFirstFieldIfTransparentUnion(Ty);
10585
10586 // Structures with either a non-trivial destructor or a non-trivial
10587 // copy constructor are always passed indirectly.
10588 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
10589 if (ArgGPRsLeft)
10590 ArgGPRsLeft -= 1;
10591 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
10592 CGCXXABI::RAA_DirectInMemory);
10593 }
10594
10595 // Ignore empty structs/unions.
10596 if (isEmptyRecord(getContext(), Ty, true))
10597 return ABIArgInfo::getIgnore();
10598
10599 uint64_t Size = getContext().getTypeSize(Ty);
10600
10601 // Pass floating point values via FPRs if possible.
10602 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
10603 FLen >= Size && ArgFPRsLeft) {
10604 ArgFPRsLeft--;
10605 return ABIArgInfo::getDirect();
10606 }
10607
10608 // Complex types for the hard float ABI must be passed direct rather than
10609 // using CoerceAndExpand.
10610 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
10611 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
10612 if (getContext().getTypeSize(EltTy) <= FLen) {
10613 ArgFPRsLeft -= 2;
10614 return ABIArgInfo::getDirect();
10615 }
10616 }
10617
10618 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
10619 llvm::Type *Field1Ty = nullptr;
10620 llvm::Type *Field2Ty = nullptr;
10621 CharUnits Field1Off = CharUnits::Zero();
10622 CharUnits Field2Off = CharUnits::Zero();
10623 int NeededArgGPRs;
10624 int NeededArgFPRs;
10625 bool IsCandidate =
10626 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
10627 NeededArgGPRs, NeededArgFPRs);
10628 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
10629 NeededArgFPRs <= ArgFPRsLeft) {
10630 ArgGPRsLeft -= NeededArgGPRs;
10631 ArgFPRsLeft -= NeededArgFPRs;
10632 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
10633 Field2Off);
10634 }
10635 }
10636
10637 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
10638 bool MustUseStack = false;
10639 // Determine the number of GPRs needed to pass the current argument
10640 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
10641 // register pairs, so may consume 3 registers.
10642 int NeededArgGPRs = 1;
10643 if (!IsFixed && NeededAlign == 2 * XLen)
10644 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
10645 else if (Size > XLen && Size <= 2 * XLen)
10646 NeededArgGPRs = 2;
10647
10648 if (NeededArgGPRs > ArgGPRsLeft) {
10649 MustUseStack = true;
10650 NeededArgGPRs = ArgGPRsLeft;
10651 }
10652
10653 ArgGPRsLeft -= NeededArgGPRs;
10654
10655 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
10656 // Treat an enum type as its underlying type.
10657 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
10658 Ty = EnumTy->getDecl()->getIntegerType();
10659
10660 // All integral types are promoted to XLen width, unless passed on the
10661 // stack.
10662 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
10663 return extendType(Ty);
10664 }
10665
10666 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
10667 if (EIT->getNumBits() < XLen && !MustUseStack)
10668 return extendType(Ty);
10669 if (EIT->getNumBits() > 128 ||
10670 (!getContext().getTargetInfo().hasInt128Type() &&
10671 EIT->getNumBits() > 64))
10672 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10673 }
10674
10675 return ABIArgInfo::getDirect();
10676 }
10677
10678 // Aggregates which are <= 2*XLen will be passed in registers if possible,
10679 // so coerce to integers.
10680 if (Size <= 2 * XLen) {
10681 unsigned Alignment = getContext().getTypeAlign(Ty);
10682
10683 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
10684 // required, and a 2-element XLen array if only XLen alignment is required.
10685 if (Size <= XLen) {
10686 return ABIArgInfo::getDirect(
10687 llvm::IntegerType::get(getVMContext(), XLen));
10688 } else if (Alignment == 2 * XLen) {
10689 return ABIArgInfo::getDirect(
10690 llvm::IntegerType::get(getVMContext(), 2 * XLen));
10691 } else {
10692 return ABIArgInfo::getDirect(llvm::ArrayType::get(
10693 llvm::IntegerType::get(getVMContext(), XLen), 2));
10694 }
10695 }
10696 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10697 }
10698
classifyReturnType(QualType RetTy) const10699 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
10700 if (RetTy->isVoidType())
10701 return ABIArgInfo::getIgnore();
10702
10703 int ArgGPRsLeft = 2;
10704 int ArgFPRsLeft = FLen ? 2 : 0;
10705
10706 // The rules for return and argument types are the same, so defer to
10707 // classifyArgumentType.
10708 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
10709 ArgFPRsLeft);
10710 }
10711
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const10712 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10713 QualType Ty) const {
10714 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
10715
10716 // Empty records are ignored for parameter passing purposes.
10717 if (isEmptyRecord(getContext(), Ty, true)) {
10718 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
10719 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
10720 return Addr;
10721 }
10722
10723 auto TInfo = getContext().getTypeInfoInChars(Ty);
10724
10725 // Arguments bigger than 2*Xlen bytes are passed indirectly.
10726 bool IsIndirect = TInfo.Width > 2 * SlotSize;
10727
10728 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
10729 SlotSize, /*AllowHigherAlign=*/true);
10730 }
10731
extendType(QualType Ty) const10732 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
10733 int TySize = getContext().getTypeSize(Ty);
10734 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
10735 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
10736 return ABIArgInfo::getSignExtend(Ty);
10737 return ABIArgInfo::getExtend(Ty);
10738 }
10739
10740 namespace {
10741 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
10742 public:
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,unsigned XLen,unsigned FLen)10743 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
10744 unsigned FLen)
10745 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
10746
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const10747 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
10748 CodeGen::CodeGenModule &CGM) const override {
10749 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
10750 if (!FD) return;
10751
10752 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
10753 if (!Attr)
10754 return;
10755
10756 const char *Kind;
10757 switch (Attr->getInterrupt()) {
10758 case RISCVInterruptAttr::user: Kind = "user"; break;
10759 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
10760 case RISCVInterruptAttr::machine: Kind = "machine"; break;
10761 }
10762
10763 auto *Fn = cast<llvm::Function>(GV);
10764
10765 Fn->addFnAttr("interrupt", Kind);
10766 }
10767 };
10768 } // namespace
10769
10770 //===----------------------------------------------------------------------===//
10771 // VE ABI Implementation.
10772 //
10773 namespace {
10774 class VEABIInfo : public DefaultABIInfo {
10775 public:
VEABIInfo(CodeGenTypes & CGT)10776 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
10777
10778 private:
10779 ABIArgInfo classifyReturnType(QualType RetTy) const;
10780 ABIArgInfo classifyArgumentType(QualType RetTy) const;
10781 void computeInfo(CGFunctionInfo &FI) const override;
10782 };
10783 } // end anonymous namespace
10784
classifyReturnType(QualType Ty) const10785 ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
10786 if (Ty->isAnyComplexType())
10787 return ABIArgInfo::getDirect();
10788 uint64_t Size = getContext().getTypeSize(Ty);
10789 if (Size < 64 && Ty->isIntegerType())
10790 return ABIArgInfo::getExtend(Ty);
10791 return DefaultABIInfo::classifyReturnType(Ty);
10792 }
10793
classifyArgumentType(QualType Ty) const10794 ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
10795 if (Ty->isAnyComplexType())
10796 return ABIArgInfo::getDirect();
10797 uint64_t Size = getContext().getTypeSize(Ty);
10798 if (Size < 64 && Ty->isIntegerType())
10799 return ABIArgInfo::getExtend(Ty);
10800 return DefaultABIInfo::classifyArgumentType(Ty);
10801 }
10802
computeInfo(CGFunctionInfo & FI) const10803 void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
10804 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
10805 for (auto &Arg : FI.arguments())
10806 Arg.info = classifyArgumentType(Arg.type);
10807 }
10808
10809 namespace {
10810 class VETargetCodeGenInfo : public TargetCodeGenInfo {
10811 public:
VETargetCodeGenInfo(CodeGenTypes & CGT)10812 VETargetCodeGenInfo(CodeGenTypes &CGT)
10813 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
10814 // VE ABI requires the arguments of variadic and prototype-less functions
10815 // are passed in both registers and memory.
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const10816 bool isNoProtoCallVariadic(const CallArgList &args,
10817 const FunctionNoProtoType *fnType) const override {
10818 return true;
10819 }
10820 };
10821 } // end anonymous namespace
10822
10823 //===----------------------------------------------------------------------===//
10824 // Driver code
10825 //===----------------------------------------------------------------------===//
10826
supportsCOMDAT() const10827 bool CodeGenModule::supportsCOMDAT() const {
10828 return getTriple().supportsCOMDAT();
10829 }
10830
getTargetCodeGenInfo()10831 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
10832 if (TheTargetCodeGenInfo)
10833 return *TheTargetCodeGenInfo;
10834
10835 // Helper to set the unique_ptr while still keeping the return value.
10836 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
10837 this->TheTargetCodeGenInfo.reset(P);
10838 return *P;
10839 };
10840
10841 const llvm::Triple &Triple = getTarget().getTriple();
10842 switch (Triple.getArch()) {
10843 default:
10844 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
10845
10846 case llvm::Triple::le32:
10847 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10848 case llvm::Triple::mips:
10849 case llvm::Triple::mipsel:
10850 if (Triple.getOS() == llvm::Triple::NaCl)
10851 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10852 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
10853
10854 case llvm::Triple::mips64:
10855 case llvm::Triple::mips64el:
10856 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
10857
10858 case llvm::Triple::avr:
10859 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
10860
10861 case llvm::Triple::aarch64:
10862 case llvm::Triple::aarch64_32:
10863 case llvm::Triple::aarch64_be: {
10864 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
10865 if (getTarget().getABI() == "darwinpcs")
10866 Kind = AArch64ABIInfo::DarwinPCS;
10867 else if (Triple.isOSWindows())
10868 return SetCGInfo(
10869 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
10870
10871 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
10872 }
10873
10874 case llvm::Triple::wasm32:
10875 case llvm::Triple::wasm64: {
10876 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
10877 if (getTarget().getABI() == "experimental-mv")
10878 Kind = WebAssemblyABIInfo::ExperimentalMV;
10879 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
10880 }
10881
10882 case llvm::Triple::arm:
10883 case llvm::Triple::armeb:
10884 case llvm::Triple::thumb:
10885 case llvm::Triple::thumbeb: {
10886 if (Triple.getOS() == llvm::Triple::Win32) {
10887 return SetCGInfo(
10888 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
10889 }
10890
10891 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
10892 StringRef ABIStr = getTarget().getABI();
10893 if (ABIStr == "apcs-gnu")
10894 Kind = ARMABIInfo::APCS;
10895 else if (ABIStr == "aapcs16")
10896 Kind = ARMABIInfo::AAPCS16_VFP;
10897 else if (CodeGenOpts.FloatABI == "hard" ||
10898 (CodeGenOpts.FloatABI != "soft" &&
10899 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
10900 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
10901 Triple.getEnvironment() == llvm::Triple::EABIHF)))
10902 Kind = ARMABIInfo::AAPCS_VFP;
10903
10904 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
10905 }
10906
10907 case llvm::Triple::ppc: {
10908 if (Triple.isOSAIX())
10909 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
10910
10911 bool IsSoftFloat =
10912 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
10913 bool RetSmallStructInRegABI =
10914 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10915 return SetCGInfo(
10916 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
10917 }
10918 case llvm::Triple::ppcle: {
10919 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10920 bool RetSmallStructInRegABI =
10921 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10922 return SetCGInfo(
10923 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
10924 }
10925 case llvm::Triple::ppc64:
10926 if (Triple.isOSAIX())
10927 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
10928
10929 if (Triple.isOSBinFormatELF()) {
10930 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
10931 if (getTarget().getABI() == "elfv2")
10932 Kind = PPC64_SVR4_ABIInfo::ELFv2;
10933 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10934
10935 return SetCGInfo(
10936 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
10937 }
10938 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
10939 case llvm::Triple::ppc64le: {
10940 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
10941 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
10942 if (getTarget().getABI() == "elfv1")
10943 Kind = PPC64_SVR4_ABIInfo::ELFv1;
10944 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10945
10946 return SetCGInfo(
10947 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
10948 }
10949
10950 case llvm::Triple::nvptx:
10951 case llvm::Triple::nvptx64:
10952 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
10953
10954 case llvm::Triple::msp430:
10955 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
10956
10957 case llvm::Triple::riscv32:
10958 case llvm::Triple::riscv64: {
10959 StringRef ABIStr = getTarget().getABI();
10960 unsigned XLen = getTarget().getPointerWidth(0);
10961 unsigned ABIFLen = 0;
10962 if (ABIStr.endswith("f"))
10963 ABIFLen = 32;
10964 else if (ABIStr.endswith("d"))
10965 ABIFLen = 64;
10966 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
10967 }
10968
10969 case llvm::Triple::systemz: {
10970 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
10971 bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
10972 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
10973 }
10974
10975 case llvm::Triple::tce:
10976 case llvm::Triple::tcele:
10977 return SetCGInfo(new TCETargetCodeGenInfo(Types));
10978
10979 case llvm::Triple::x86: {
10980 bool IsDarwinVectorABI = Triple.isOSDarwin();
10981 bool RetSmallStructInRegABI =
10982 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10983 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
10984
10985 if (Triple.getOS() == llvm::Triple::Win32) {
10986 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
10987 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
10988 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
10989 } else {
10990 return SetCGInfo(new X86_32TargetCodeGenInfo(
10991 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
10992 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
10993 CodeGenOpts.FloatABI == "soft"));
10994 }
10995 }
10996
10997 case llvm::Triple::x86_64: {
10998 StringRef ABI = getTarget().getABI();
10999 X86AVXABILevel AVXLevel =
11000 (ABI == "avx512"
11001 ? X86AVXABILevel::AVX512
11002 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
11003
11004 switch (Triple.getOS()) {
11005 case llvm::Triple::Win32:
11006 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
11007 default:
11008 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
11009 }
11010 }
11011 case llvm::Triple::hexagon:
11012 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
11013 case llvm::Triple::lanai:
11014 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
11015 case llvm::Triple::r600:
11016 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11017 case llvm::Triple::amdgcn:
11018 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11019 case llvm::Triple::sparc:
11020 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
11021 case llvm::Triple::sparcv9:
11022 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
11023 case llvm::Triple::xcore:
11024 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
11025 case llvm::Triple::arc:
11026 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
11027 case llvm::Triple::spir:
11028 case llvm::Triple::spir64:
11029 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
11030 case llvm::Triple::ve:
11031 return SetCGInfo(new VETargetCodeGenInfo(Types));
11032 }
11033 }
11034
11035 /// Create an OpenCL kernel for an enqueued block.
11036 ///
11037 /// The kernel has the same function type as the block invoke function. Its
11038 /// name is the name of the block invoke function postfixed with "_kernel".
11039 /// It simply calls the block invoke function then returns.
11040 llvm::Function *
createEnqueuedBlockKernel(CodeGenFunction & CGF,llvm::Function * Invoke,llvm::Value * BlockLiteral) const11041 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
11042 llvm::Function *Invoke,
11043 llvm::Value *BlockLiteral) const {
11044 auto *InvokeFT = Invoke->getFunctionType();
11045 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11046 for (auto &P : InvokeFT->params())
11047 ArgTys.push_back(P);
11048 auto &C = CGF.getLLVMContext();
11049 std::string Name = Invoke->getName().str() + "_kernel";
11050 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11051 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11052 &CGF.CGM.getModule());
11053 auto IP = CGF.Builder.saveIP();
11054 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11055 auto &Builder = CGF.Builder;
11056 Builder.SetInsertPoint(BB);
11057 llvm::SmallVector<llvm::Value *, 2> Args;
11058 for (auto &A : F->args())
11059 Args.push_back(&A);
11060 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
11061 call->setCallingConv(Invoke->getCallingConv());
11062 Builder.CreateRetVoid();
11063 Builder.restoreIP(IP);
11064 return F;
11065 }
11066
11067 /// Create an OpenCL kernel for an enqueued block.
11068 ///
11069 /// The type of the first argument (the block literal) is the struct type
11070 /// of the block literal instead of a pointer type. The first argument
11071 /// (block literal) is passed directly by value to the kernel. The kernel
11072 /// allocates the same type of struct on stack and stores the block literal
11073 /// to it and passes its pointer to the block invoke function. The kernel
11074 /// has "enqueued-block" function attribute and kernel argument metadata.
createEnqueuedBlockKernel(CodeGenFunction & CGF,llvm::Function * Invoke,llvm::Value * BlockLiteral) const11075 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
11076 CodeGenFunction &CGF, llvm::Function *Invoke,
11077 llvm::Value *BlockLiteral) const {
11078 auto &Builder = CGF.Builder;
11079 auto &C = CGF.getLLVMContext();
11080
11081 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
11082 auto *InvokeFT = Invoke->getFunctionType();
11083 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11084 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
11085 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
11086 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
11087 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
11088 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
11089 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
11090
11091 ArgTys.push_back(BlockTy);
11092 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11093 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
11094 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11095 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11096 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11097 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
11098 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
11099 ArgTys.push_back(InvokeFT->getParamType(I));
11100 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
11101 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
11102 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11103 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
11104 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11105 ArgNames.push_back(
11106 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
11107 }
11108 std::string Name = Invoke->getName().str() + "_kernel";
11109 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11110 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11111 &CGF.CGM.getModule());
11112 F->addFnAttr("enqueued-block");
11113 auto IP = CGF.Builder.saveIP();
11114 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11115 Builder.SetInsertPoint(BB);
11116 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
11117 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
11118 BlockPtr->setAlignment(BlockAlign);
11119 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
11120 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
11121 llvm::SmallVector<llvm::Value *, 2> Args;
11122 Args.push_back(Cast);
11123 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
11124 Args.push_back(I);
11125 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
11126 call->setCallingConv(Invoke->getCallingConv());
11127 Builder.CreateRetVoid();
11128 Builder.restoreIP(IP);
11129
11130 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
11131 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
11132 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
11133 F->setMetadata("kernel_arg_base_type",
11134 llvm::MDNode::get(C, ArgBaseTypeNames));
11135 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
11136 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
11137 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
11138
11139 return F;
11140 }
11141