1 //===- X86.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 #include "clang/Basic/DiagnosticFrontend.h"
12 #include "llvm/ADT/SmallBitVector.h"
13
14 using namespace clang;
15 using namespace clang::CodeGen;
16
17 namespace {
18
19 /// IsX86_MMXType - Return true if this is an MMX type.
IsX86_MMXType(llvm::Type * IRType)20 bool IsX86_MMXType(llvm::Type *IRType) {
21 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24 IRType->getScalarSizeInBits() != 64;
25 }
26
X86AdjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)27 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
28 StringRef Constraint,
29 llvm::Type* Ty) {
30 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31 .Cases("y", "&y", "^Ym", true)
32 .Default(false);
33 if (IsMMXCons && Ty->isVectorTy()) {
34 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
35 64) {
36 // Invalid MMX constraint
37 return nullptr;
38 }
39
40 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
41 }
42
43 if (Constraint == "k") {
44 llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGF.getLLVMContext());
45 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
46 }
47
48 // No operation needed
49 return Ty;
50 }
51
52 /// Returns true if this type can be passed in SSE registers with the
53 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorTypeForVectorCall(ASTContext & Context,QualType Ty)54 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
55 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
56 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
57 if (BT->getKind() == BuiltinType::LongDouble) {
58 if (&Context.getTargetInfo().getLongDoubleFormat() ==
59 &llvm::APFloat::x87DoubleExtended())
60 return false;
61 }
62 return true;
63 }
64 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
65 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
66 // registers specially.
67 unsigned VecSize = Context.getTypeSize(VT);
68 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
69 return true;
70 }
71 return false;
72 }
73
74 /// Returns true if this aggregate is small enough to be passed in SSE registers
75 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorCallAggregateSmallEnough(uint64_t NumMembers)76 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
77 return NumMembers <= 4;
78 }
79
80 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
getDirectX86Hva(llvm::Type * T=nullptr)81 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
82 auto AI = ABIArgInfo::getDirect(T);
83 AI.setInReg(true);
84 AI.setCanBeFlattened(false);
85 return AI;
86 }
87
88 //===----------------------------------------------------------------------===//
89 // X86-32 ABI Implementation
90 //===----------------------------------------------------------------------===//
91
92 /// Similar to llvm::CCState, but for Clang.
93 struct CCState {
CCState__anon6e0b71f50111::CCState94 CCState(CGFunctionInfo &FI)
95 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
96 Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
97
98 llvm::SmallBitVector IsPreassigned;
99 unsigned CC = CallingConv::CC_C;
100 unsigned FreeRegs = 0;
101 unsigned FreeSSERegs = 0;
102 RequiredArgs Required;
103 bool IsDelegateCall = false;
104 };
105
106 /// X86_32ABIInfo - The X86-32 ABI information.
107 class X86_32ABIInfo : public ABIInfo {
108 enum Class {
109 Integer,
110 Float
111 };
112
113 static const unsigned MinABIStackAlignInBytes = 4;
114
115 bool IsDarwinVectorABI;
116 bool IsRetSmallStructInRegABI;
117 bool IsWin32StructABI;
118 bool IsSoftFloatABI;
119 bool IsMCUABI;
120 bool IsLinuxABI;
121 unsigned DefaultNumRegisterParameters;
122
isRegisterSize(unsigned Size)123 static bool isRegisterSize(unsigned Size) {
124 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
125 }
126
isHomogeneousAggregateBaseType(QualType Ty) const127 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
128 // FIXME: Assumes vectorcall is in use.
129 return isX86VectorTypeForVectorCall(getContext(), Ty);
130 }
131
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const132 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
133 uint64_t NumMembers) const override {
134 // FIXME: Assumes vectorcall is in use.
135 return isX86VectorCallAggregateSmallEnough(NumMembers);
136 }
137
138 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
139
140 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
141 /// such that the argument will be passed in memory.
142 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
143
144 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
145
146 /// Return the alignment to use for the given type on the stack.
147 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
148
149 Class classify(QualType Ty) const;
150 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
151 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
152 unsigned ArgIndex) const;
153
154 /// Updates the number of available free registers, returns
155 /// true if any registers were allocated.
156 bool updateFreeRegs(QualType Ty, CCState &State) const;
157
158 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
159 bool &NeedsPadding) const;
160 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
161
162 bool canExpandIndirectArgument(QualType Ty) const;
163
164 /// Rewrite the function info so that all memory arguments use
165 /// inalloca.
166 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
167
168 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
169 CharUnits &StackOffset, ABIArgInfo &Info,
170 QualType Type) const;
171 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
172
173 public:
174
175 void computeInfo(CGFunctionInfo &FI) const override;
176 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
177 QualType Ty) const override;
178
X86_32ABIInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)179 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
180 bool RetSmallStructInRegABI, bool Win32StructABI,
181 unsigned NumRegisterParameters, bool SoftFloatABI)
182 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
183 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
184 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
185 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
186 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
187 CGT.getTarget().getTriple().isOSCygMing()),
188 DefaultNumRegisterParameters(NumRegisterParameters) {}
189 };
190
191 class X86_32SwiftABIInfo : public SwiftABIInfo {
192 public:
X86_32SwiftABIInfo(CodeGenTypes & CGT)193 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
194 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/false) {}
195
shouldPassIndirectly(ArrayRef<llvm::Type * > ComponentTys,bool AsReturnValue) const196 bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
197 bool AsReturnValue) const override {
198 // LLVM's x86-32 lowering currently only assigns up to three
199 // integer registers and three fp registers. Oddly, it'll use up to
200 // four vector registers for vectors, but those can overlap with the
201 // scalar registers.
202 return occupiesMoreThan(ComponentTys, /*total=*/3);
203 }
204 };
205
206 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
207 public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)208 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
209 bool RetSmallStructInRegABI, bool Win32StructABI,
210 unsigned NumRegisterParameters, bool SoftFloatABI)
211 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
212 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
213 NumRegisterParameters, SoftFloatABI)) {
214 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
215 }
216
217 static bool isStructReturnInRegABI(
218 const llvm::Triple &Triple, const CodeGenOptions &Opts);
219
220 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
221 CodeGen::CodeGenModule &CGM) const override;
222
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const223 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
224 // Darwin uses different dwarf register numbers for EH.
225 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
226 return 4;
227 }
228
229 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
230 llvm::Value *Address) const override;
231
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const232 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
233 StringRef Constraint,
234 llvm::Type* Ty) const override {
235 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
236 }
237
238 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
239 std::string &Constraints,
240 std::vector<llvm::Type *> &ResultRegTypes,
241 std::vector<llvm::Type *> &ResultTruncRegTypes,
242 std::vector<LValue> &ResultRegDests,
243 std::string &AsmString,
244 unsigned NumOutputs) const override;
245
getARCRetainAutoreleasedReturnValueMarker() const246 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
247 return "movl\t%ebp, %ebp"
248 "\t\t// marker for objc_retainAutoreleaseReturnValue";
249 }
250 };
251
252 }
253
254 /// Rewrite input constraint references after adding some output constraints.
255 /// In the case where there is one output and one input and we add one output,
256 /// we need to replace all operand references greater than or equal to 1:
257 /// mov $0, $1
258 /// mov eax, $1
259 /// The result will be:
260 /// mov $0, $2
261 /// mov eax, $2
rewriteInputConstraintReferences(unsigned FirstIn,unsigned NumNewOuts,std::string & AsmString)262 static void rewriteInputConstraintReferences(unsigned FirstIn,
263 unsigned NumNewOuts,
264 std::string &AsmString) {
265 std::string Buf;
266 llvm::raw_string_ostream OS(Buf);
267 size_t Pos = 0;
268 while (Pos < AsmString.size()) {
269 size_t DollarStart = AsmString.find('$', Pos);
270 if (DollarStart == std::string::npos)
271 DollarStart = AsmString.size();
272 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
273 if (DollarEnd == std::string::npos)
274 DollarEnd = AsmString.size();
275 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
276 Pos = DollarEnd;
277 size_t NumDollars = DollarEnd - DollarStart;
278 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
279 // We have an operand reference.
280 size_t DigitStart = Pos;
281 if (AsmString[DigitStart] == '{') {
282 OS << '{';
283 ++DigitStart;
284 }
285 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
286 if (DigitEnd == std::string::npos)
287 DigitEnd = AsmString.size();
288 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
289 unsigned OperandIndex;
290 if (!OperandStr.getAsInteger(10, OperandIndex)) {
291 if (OperandIndex >= FirstIn)
292 OperandIndex += NumNewOuts;
293 OS << OperandIndex;
294 } else {
295 OS << OperandStr;
296 }
297 Pos = DigitEnd;
298 }
299 }
300 AsmString = std::move(OS.str());
301 }
302
303 /// Add output constraints for EAX:EDX because they are return registers.
addReturnRegisterOutputs(CodeGenFunction & CGF,LValue ReturnSlot,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs) const304 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
305 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
306 std::vector<llvm::Type *> &ResultRegTypes,
307 std::vector<llvm::Type *> &ResultTruncRegTypes,
308 std::vector<LValue> &ResultRegDests, std::string &AsmString,
309 unsigned NumOutputs) const {
310 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
311
312 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
313 // larger.
314 if (!Constraints.empty())
315 Constraints += ',';
316 if (RetWidth <= 32) {
317 Constraints += "={eax}";
318 ResultRegTypes.push_back(CGF.Int32Ty);
319 } else {
320 // Use the 'A' constraint for EAX:EDX.
321 Constraints += "=A";
322 ResultRegTypes.push_back(CGF.Int64Ty);
323 }
324
325 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
326 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
327 ResultTruncRegTypes.push_back(CoerceTy);
328
329 // Coerce the integer by bitcasting the return slot pointer.
330 ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
331 ResultRegDests.push_back(ReturnSlot);
332
333 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
334 }
335
336 /// shouldReturnTypeInRegister - Determine if the given type should be
337 /// returned in a register (for the Darwin and MCU ABI).
shouldReturnTypeInRegister(QualType Ty,ASTContext & Context) const338 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
339 ASTContext &Context) const {
340 uint64_t Size = Context.getTypeSize(Ty);
341
342 // For i386, type must be register sized.
343 // For the MCU ABI, it only needs to be <= 8-byte
344 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
345 return false;
346
347 if (Ty->isVectorType()) {
348 // 64- and 128- bit vectors inside structures are not returned in
349 // registers.
350 if (Size == 64 || Size == 128)
351 return false;
352
353 return true;
354 }
355
356 // If this is a builtin, pointer, enum, complex type, member pointer, or
357 // member function pointer it is ok.
358 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
359 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
360 Ty->isBlockPointerType() || Ty->isMemberPointerType())
361 return true;
362
363 // Arrays are treated like records.
364 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
365 return shouldReturnTypeInRegister(AT->getElementType(), Context);
366
367 // Otherwise, it must be a record type.
368 const RecordType *RT = Ty->getAs<RecordType>();
369 if (!RT) return false;
370
371 // FIXME: Traverse bases here too.
372
373 // Structure types are passed in register if all fields would be
374 // passed in a register.
375 for (const auto *FD : RT->getDecl()->fields()) {
376 // Empty fields are ignored.
377 if (isEmptyField(Context, FD, true))
378 continue;
379
380 // Check fields recursively.
381 if (!shouldReturnTypeInRegister(FD->getType(), Context))
382 return false;
383 }
384 return true;
385 }
386
is32Or64BitBasicType(QualType Ty,ASTContext & Context)387 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
388 // Treat complex types as the element type.
389 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
390 Ty = CTy->getElementType();
391
392 // Check for a type which we know has a simple scalar argument-passing
393 // convention without any padding. (We're specifically looking for 32
394 // and 64-bit integer and integer-equivalents, float, and double.)
395 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
396 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
397 return false;
398
399 uint64_t Size = Context.getTypeSize(Ty);
400 return Size == 32 || Size == 64;
401 }
402
addFieldSizes(ASTContext & Context,const RecordDecl * RD,uint64_t & Size)403 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
404 uint64_t &Size) {
405 for (const auto *FD : RD->fields()) {
406 // Scalar arguments on the stack get 4 byte alignment on x86. If the
407 // argument is smaller than 32-bits, expanding the struct will create
408 // alignment padding.
409 if (!is32Or64BitBasicType(FD->getType(), Context))
410 return false;
411
412 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
413 // how to expand them yet, and the predicate for telling if a bitfield still
414 // counts as "basic" is more complicated than what we were doing previously.
415 if (FD->isBitField())
416 return false;
417
418 Size += Context.getTypeSize(FD->getType());
419 }
420 return true;
421 }
422
addBaseAndFieldSizes(ASTContext & Context,const CXXRecordDecl * RD,uint64_t & Size)423 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
424 uint64_t &Size) {
425 // Don't do this if there are any non-empty bases.
426 for (const CXXBaseSpecifier &Base : RD->bases()) {
427 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
428 Size))
429 return false;
430 }
431 if (!addFieldSizes(Context, RD, Size))
432 return false;
433 return true;
434 }
435
436 /// Test whether an argument type which is to be passed indirectly (on the
437 /// stack) would have the equivalent layout if it was expanded into separate
438 /// arguments. If so, we prefer to do the latter to avoid inhibiting
439 /// optimizations.
canExpandIndirectArgument(QualType Ty) const440 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
441 // We can only expand structure types.
442 const RecordType *RT = Ty->getAs<RecordType>();
443 if (!RT)
444 return false;
445 const RecordDecl *RD = RT->getDecl();
446 uint64_t Size = 0;
447 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
448 if (!IsWin32StructABI) {
449 // On non-Windows, we have to conservatively match our old bitcode
450 // prototypes in order to be ABI-compatible at the bitcode level.
451 if (!CXXRD->isCLike())
452 return false;
453 } else {
454 // Don't do this for dynamic classes.
455 if (CXXRD->isDynamicClass())
456 return false;
457 }
458 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
459 return false;
460 } else {
461 if (!addFieldSizes(getContext(), RD, Size))
462 return false;
463 }
464
465 // We can do this if there was no alignment padding.
466 return Size == getContext().getTypeSize(Ty);
467 }
468
getIndirectReturnResult(QualType RetTy,CCState & State) const469 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
470 // If the return value is indirect, then the hidden argument is consuming one
471 // integer register.
472 if (State.FreeRegs) {
473 --State.FreeRegs;
474 if (!IsMCUABI)
475 return getNaturalAlignIndirectInReg(RetTy);
476 }
477 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
478 }
479
classifyReturnType(QualType RetTy,CCState & State) const480 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
481 CCState &State) const {
482 if (RetTy->isVoidType())
483 return ABIArgInfo::getIgnore();
484
485 const Type *Base = nullptr;
486 uint64_t NumElts = 0;
487 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
488 State.CC == llvm::CallingConv::X86_RegCall) &&
489 isHomogeneousAggregate(RetTy, Base, NumElts)) {
490 // The LLVM struct type for such an aggregate should lower properly.
491 return ABIArgInfo::getDirect();
492 }
493
494 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
495 // On Darwin, some vectors are returned in registers.
496 if (IsDarwinVectorABI) {
497 uint64_t Size = getContext().getTypeSize(RetTy);
498
499 // 128-bit vectors are a special case; they are returned in
500 // registers and we need to make sure to pick a type the LLVM
501 // backend will like.
502 if (Size == 128)
503 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
504 llvm::Type::getInt64Ty(getVMContext()), 2));
505
506 // Always return in register if it fits in a general purpose
507 // register, or if it is 64 bits and has a single element.
508 if ((Size == 8 || Size == 16 || Size == 32) ||
509 (Size == 64 && VT->getNumElements() == 1))
510 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
511 Size));
512
513 return getIndirectReturnResult(RetTy, State);
514 }
515
516 return ABIArgInfo::getDirect();
517 }
518
519 if (isAggregateTypeForABI(RetTy)) {
520 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
521 // Structures with flexible arrays are always indirect.
522 if (RT->getDecl()->hasFlexibleArrayMember())
523 return getIndirectReturnResult(RetTy, State);
524 }
525
526 // If specified, structs and unions are always indirect.
527 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
528 return getIndirectReturnResult(RetTy, State);
529
530 // Ignore empty structs/unions.
531 if (isEmptyRecord(getContext(), RetTy, true))
532 return ABIArgInfo::getIgnore();
533
534 // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
535 if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
536 QualType ET = getContext().getCanonicalType(CT->getElementType());
537 if (ET->isFloat16Type())
538 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
539 llvm::Type::getHalfTy(getVMContext()), 2));
540 }
541
542 // Small structures which are register sized are generally returned
543 // in a register.
544 if (shouldReturnTypeInRegister(RetTy, getContext())) {
545 uint64_t Size = getContext().getTypeSize(RetTy);
546
547 // As a special-case, if the struct is a "single-element" struct, and
548 // the field is of type "float" or "double", return it in a
549 // floating-point register. (MSVC does not apply this special case.)
550 // We apply a similar transformation for pointer types to improve the
551 // quality of the generated IR.
552 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
553 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
554 || SeltTy->hasPointerRepresentation())
555 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
556
557 // FIXME: We should be able to narrow this integer in cases with dead
558 // padding.
559 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
560 }
561
562 return getIndirectReturnResult(RetTy, State);
563 }
564
565 // Treat an enum type as its underlying type.
566 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
567 RetTy = EnumTy->getDecl()->getIntegerType();
568
569 if (const auto *EIT = RetTy->getAs<BitIntType>())
570 if (EIT->getNumBits() > 64)
571 return getIndirectReturnResult(RetTy, State);
572
573 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
574 : ABIArgInfo::getDirect());
575 }
576
getTypeStackAlignInBytes(QualType Ty,unsigned Align) const577 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
578 unsigned Align) const {
579 // Otherwise, if the alignment is less than or equal to the minimum ABI
580 // alignment, just use the default; the backend will handle this.
581 if (Align <= MinABIStackAlignInBytes)
582 return 0; // Use default alignment.
583
584 if (IsLinuxABI) {
585 // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
586 // want to spend any effort dealing with the ramifications of ABI breaks.
587 //
588 // If the vector type is __m128/__m256/__m512, return the default alignment.
589 if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
590 return Align;
591 }
592 // On non-Darwin, the stack type alignment is always 4.
593 if (!IsDarwinVectorABI) {
594 // Set explicit alignment, since we may need to realign the top.
595 return MinABIStackAlignInBytes;
596 }
597
598 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
599 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
600 isRecordWithSIMDVectorType(getContext(), Ty)))
601 return 16;
602
603 return MinABIStackAlignInBytes;
604 }
605
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const606 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
607 CCState &State) const {
608 if (!ByVal) {
609 if (State.FreeRegs) {
610 --State.FreeRegs; // Non-byval indirects just use one pointer.
611 if (!IsMCUABI)
612 return getNaturalAlignIndirectInReg(Ty);
613 }
614 return getNaturalAlignIndirect(Ty, false);
615 }
616
617 // Compute the byval alignment.
618 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
619 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
620 if (StackAlign == 0)
621 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
622
623 // If the stack alignment is less than the type alignment, realign the
624 // argument.
625 bool Realign = TypeAlign > StackAlign;
626 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
627 /*ByVal=*/true, Realign);
628 }
629
classify(QualType Ty) const630 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
631 const Type *T = isSingleElementStruct(Ty, getContext());
632 if (!T)
633 T = Ty.getTypePtr();
634
635 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
636 BuiltinType::Kind K = BT->getKind();
637 if (K == BuiltinType::Float || K == BuiltinType::Double)
638 return Float;
639 }
640 return Integer;
641 }
642
updateFreeRegs(QualType Ty,CCState & State) const643 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
644 if (!IsSoftFloatABI) {
645 Class C = classify(Ty);
646 if (C == Float)
647 return false;
648 }
649
650 unsigned Size = getContext().getTypeSize(Ty);
651 unsigned SizeInRegs = (Size + 31) / 32;
652
653 if (SizeInRegs == 0)
654 return false;
655
656 if (!IsMCUABI) {
657 if (SizeInRegs > State.FreeRegs) {
658 State.FreeRegs = 0;
659 return false;
660 }
661 } else {
662 // The MCU psABI allows passing parameters in-reg even if there are
663 // earlier parameters that are passed on the stack. Also,
664 // it does not allow passing >8-byte structs in-register,
665 // even if there are 3 free registers available.
666 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
667 return false;
668 }
669
670 State.FreeRegs -= SizeInRegs;
671 return true;
672 }
673
shouldAggregateUseDirect(QualType Ty,CCState & State,bool & InReg,bool & NeedsPadding) const674 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
675 bool &InReg,
676 bool &NeedsPadding) const {
677 // On Windows, aggregates other than HFAs are never passed in registers, and
678 // they do not consume register slots. Homogenous floating-point aggregates
679 // (HFAs) have already been dealt with at this point.
680 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
681 return false;
682
683 NeedsPadding = false;
684 InReg = !IsMCUABI;
685
686 if (!updateFreeRegs(Ty, State))
687 return false;
688
689 if (IsMCUABI)
690 return true;
691
692 if (State.CC == llvm::CallingConv::X86_FastCall ||
693 State.CC == llvm::CallingConv::X86_VectorCall ||
694 State.CC == llvm::CallingConv::X86_RegCall) {
695 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
696 NeedsPadding = true;
697
698 return false;
699 }
700
701 return true;
702 }
703
shouldPrimitiveUseInReg(QualType Ty,CCState & State) const704 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
705 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
706 (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
707 Ty->isReferenceType());
708
709 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
710 State.CC == llvm::CallingConv::X86_VectorCall))
711 return false;
712
713 if (!updateFreeRegs(Ty, State))
714 return false;
715
716 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
717 return false;
718
719 // Return true to apply inreg to all legal parameters except for MCU targets.
720 return !IsMCUABI;
721 }
722
runVectorCallFirstPass(CGFunctionInfo & FI,CCState & State) const723 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
724 // Vectorcall x86 works subtly different than in x64, so the format is
725 // a bit different than the x64 version. First, all vector types (not HVAs)
726 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
727 // This differs from the x64 implementation, where the first 6 by INDEX get
728 // registers.
729 // In the second pass over the arguments, HVAs are passed in the remaining
730 // vector registers if possible, or indirectly by address. The address will be
731 // passed in ECX/EDX if available. Any other arguments are passed according to
732 // the usual fastcall rules.
733 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
734 for (int I = 0, E = Args.size(); I < E; ++I) {
735 const Type *Base = nullptr;
736 uint64_t NumElts = 0;
737 const QualType &Ty = Args[I].type;
738 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
739 isHomogeneousAggregate(Ty, Base, NumElts)) {
740 if (State.FreeSSERegs >= NumElts) {
741 State.FreeSSERegs -= NumElts;
742 Args[I].info = ABIArgInfo::getDirectInReg();
743 State.IsPreassigned.set(I);
744 }
745 }
746 }
747 }
748
classifyArgumentType(QualType Ty,CCState & State,unsigned ArgIndex) const749 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
750 unsigned ArgIndex) const {
751 // FIXME: Set alignment on indirect arguments.
752 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
753 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
754 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
755
756 Ty = useFirstFieldIfTransparentUnion(Ty);
757 TypeInfo TI = getContext().getTypeInfo(Ty);
758
759 // Check with the C++ ABI first.
760 const RecordType *RT = Ty->getAs<RecordType>();
761 if (RT) {
762 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
763 if (RAA == CGCXXABI::RAA_Indirect) {
764 return getIndirectResult(Ty, false, State);
765 } else if (State.IsDelegateCall) {
766 // Avoid having different alignments on delegate call args by always
767 // setting the alignment to 4, which is what we do for inallocas.
768 ABIArgInfo Res = getIndirectResult(Ty, false, State);
769 Res.setIndirectAlign(CharUnits::fromQuantity(4));
770 return Res;
771 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
772 // The field index doesn't matter, we'll fix it up later.
773 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
774 }
775 }
776
777 // Regcall uses the concept of a homogenous vector aggregate, similar
778 // to other targets.
779 const Type *Base = nullptr;
780 uint64_t NumElts = 0;
781 if ((IsRegCall || IsVectorCall) &&
782 isHomogeneousAggregate(Ty, Base, NumElts)) {
783 if (State.FreeSSERegs >= NumElts) {
784 State.FreeSSERegs -= NumElts;
785
786 // Vectorcall passes HVAs directly and does not flatten them, but regcall
787 // does.
788 if (IsVectorCall)
789 return getDirectX86Hva();
790
791 if (Ty->isBuiltinType() || Ty->isVectorType())
792 return ABIArgInfo::getDirect();
793 return ABIArgInfo::getExpand();
794 }
795 return getIndirectResult(Ty, /*ByVal=*/false, State);
796 }
797
798 if (isAggregateTypeForABI(Ty)) {
799 // Structures with flexible arrays are always indirect.
800 // FIXME: This should not be byval!
801 if (RT && RT->getDecl()->hasFlexibleArrayMember())
802 return getIndirectResult(Ty, true, State);
803
804 // Ignore empty structs/unions on non-Windows.
805 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
806 return ABIArgInfo::getIgnore();
807
808 llvm::LLVMContext &LLVMContext = getVMContext();
809 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
810 bool NeedsPadding = false;
811 bool InReg;
812 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
813 unsigned SizeInRegs = (TI.Width + 31) / 32;
814 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
815 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
816 if (InReg)
817 return ABIArgInfo::getDirectInReg(Result);
818 else
819 return ABIArgInfo::getDirect(Result);
820 }
821 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
822
823 // Pass over-aligned aggregates to non-variadic functions on Windows
824 // indirectly. This behavior was added in MSVC 2015. Use the required
825 // alignment from the record layout, since that may be less than the
826 // regular type alignment, and types with required alignment of less than 4
827 // bytes are not passed indirectly.
828 if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
829 unsigned AlignInBits = 0;
830 if (RT) {
831 const ASTRecordLayout &Layout =
832 getContext().getASTRecordLayout(RT->getDecl());
833 AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
834 } else if (TI.isAlignRequired()) {
835 AlignInBits = TI.Align;
836 }
837 if (AlignInBits > 32)
838 return getIndirectResult(Ty, /*ByVal=*/false, State);
839 }
840
841 // Expand small (<= 128-bit) record types when we know that the stack layout
842 // of those arguments will match the struct. This is important because the
843 // LLVM backend isn't smart enough to remove byval, which inhibits many
844 // optimizations.
845 // Don't do this for the MCU if there are still free integer registers
846 // (see X86_64 ABI for full explanation).
847 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
848 canExpandIndirectArgument(Ty))
849 return ABIArgInfo::getExpandWithPadding(
850 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
851
852 return getIndirectResult(Ty, true, State);
853 }
854
855 if (const VectorType *VT = Ty->getAs<VectorType>()) {
856 // On Windows, vectors are passed directly if registers are available, or
857 // indirectly if not. This avoids the need to align argument memory. Pass
858 // user-defined vector types larger than 512 bits indirectly for simplicity.
859 if (IsWin32StructABI) {
860 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
861 --State.FreeSSERegs;
862 return ABIArgInfo::getDirectInReg();
863 }
864 return getIndirectResult(Ty, /*ByVal=*/false, State);
865 }
866
867 // On Darwin, some vectors are passed in memory, we handle this by passing
868 // it as an i8/i16/i32/i64.
869 if (IsDarwinVectorABI) {
870 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
871 (TI.Width == 64 && VT->getNumElements() == 1))
872 return ABIArgInfo::getDirect(
873 llvm::IntegerType::get(getVMContext(), TI.Width));
874 }
875
876 if (IsX86_MMXType(CGT.ConvertType(Ty)))
877 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
878
879 return ABIArgInfo::getDirect();
880 }
881
882
883 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
884 Ty = EnumTy->getDecl()->getIntegerType();
885
886 bool InReg = shouldPrimitiveUseInReg(Ty, State);
887
888 if (isPromotableIntegerTypeForABI(Ty)) {
889 if (InReg)
890 return ABIArgInfo::getExtendInReg(Ty);
891 return ABIArgInfo::getExtend(Ty);
892 }
893
894 if (const auto *EIT = Ty->getAs<BitIntType>()) {
895 if (EIT->getNumBits() <= 64) {
896 if (InReg)
897 return ABIArgInfo::getDirectInReg();
898 return ABIArgInfo::getDirect();
899 }
900 return getIndirectResult(Ty, /*ByVal=*/false, State);
901 }
902
903 if (InReg)
904 return ABIArgInfo::getDirectInReg();
905 return ABIArgInfo::getDirect();
906 }
907
computeInfo(CGFunctionInfo & FI) const908 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
909 CCState State(FI);
910 if (IsMCUABI)
911 State.FreeRegs = 3;
912 else if (State.CC == llvm::CallingConv::X86_FastCall) {
913 State.FreeRegs = 2;
914 State.FreeSSERegs = 3;
915 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
916 State.FreeRegs = 2;
917 State.FreeSSERegs = 6;
918 } else if (FI.getHasRegParm())
919 State.FreeRegs = FI.getRegParm();
920 else if (State.CC == llvm::CallingConv::X86_RegCall) {
921 State.FreeRegs = 5;
922 State.FreeSSERegs = 8;
923 } else if (IsWin32StructABI) {
924 // Since MSVC 2015, the first three SSE vectors have been passed in
925 // registers. The rest are passed indirectly.
926 State.FreeRegs = DefaultNumRegisterParameters;
927 State.FreeSSERegs = 3;
928 } else
929 State.FreeRegs = DefaultNumRegisterParameters;
930
931 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
932 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
933 } else if (FI.getReturnInfo().isIndirect()) {
934 // The C++ ABI is not aware of register usage, so we have to check if the
935 // return value was sret and put it in a register ourselves if appropriate.
936 if (State.FreeRegs) {
937 --State.FreeRegs; // The sret parameter consumes a register.
938 if (!IsMCUABI)
939 FI.getReturnInfo().setInReg(true);
940 }
941 }
942
943 // The chain argument effectively gives us another free register.
944 if (FI.isChainCall())
945 ++State.FreeRegs;
946
947 // For vectorcall, do a first pass over the arguments, assigning FP and vector
948 // arguments to XMM registers as available.
949 if (State.CC == llvm::CallingConv::X86_VectorCall)
950 runVectorCallFirstPass(FI, State);
951
952 bool UsedInAlloca = false;
953 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
954 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
955 // Skip arguments that have already been assigned.
956 if (State.IsPreassigned.test(I))
957 continue;
958
959 Args[I].info =
960 classifyArgumentType(Args[I].type, State, I);
961 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
962 }
963
964 // If we needed to use inalloca for any argument, do a second pass and rewrite
965 // all the memory arguments to use inalloca.
966 if (UsedInAlloca)
967 rewriteWithInAlloca(FI);
968 }
969
970 void
addFieldToArgStruct(SmallVector<llvm::Type *,6> & FrameFields,CharUnits & StackOffset,ABIArgInfo & Info,QualType Type) const971 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
972 CharUnits &StackOffset, ABIArgInfo &Info,
973 QualType Type) const {
974 // Arguments are always 4-byte-aligned.
975 CharUnits WordSize = CharUnits::fromQuantity(4);
976 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
977
978 // sret pointers and indirect things will require an extra pointer
979 // indirection, unless they are byval. Most things are byval, and will not
980 // require this indirection.
981 bool IsIndirect = false;
982 if (Info.isIndirect() && !Info.getIndirectByVal())
983 IsIndirect = true;
984 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
985 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
986 if (IsIndirect)
987 LLTy = llvm::PointerType::getUnqual(getVMContext());
988 FrameFields.push_back(LLTy);
989 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
990
991 // Insert padding bytes to respect alignment.
992 CharUnits FieldEnd = StackOffset;
993 StackOffset = FieldEnd.alignTo(WordSize);
994 if (StackOffset != FieldEnd) {
995 CharUnits NumBytes = StackOffset - FieldEnd;
996 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
997 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
998 FrameFields.push_back(Ty);
999 }
1000 }
1001
isArgInAlloca(const ABIArgInfo & Info)1002 static bool isArgInAlloca(const ABIArgInfo &Info) {
1003 // Leave ignored and inreg arguments alone.
1004 switch (Info.getKind()) {
1005 case ABIArgInfo::InAlloca:
1006 return true;
1007 case ABIArgInfo::Ignore:
1008 case ABIArgInfo::IndirectAliased:
1009 return false;
1010 case ABIArgInfo::Indirect:
1011 case ABIArgInfo::Direct:
1012 case ABIArgInfo::Extend:
1013 return !Info.getInReg();
1014 case ABIArgInfo::Expand:
1015 case ABIArgInfo::CoerceAndExpand:
1016 // These are aggregate types which are never passed in registers when
1017 // inalloca is involved.
1018 return true;
1019 }
1020 llvm_unreachable("invalid enum");
1021 }
1022
rewriteWithInAlloca(CGFunctionInfo & FI) const1023 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1024 assert(IsWin32StructABI && "inalloca only supported on win32");
1025
1026 // Build a packed struct type for all of the arguments in memory.
1027 SmallVector<llvm::Type *, 6> FrameFields;
1028
1029 // The stack alignment is always 4.
1030 CharUnits StackAlign = CharUnits::fromQuantity(4);
1031
1032 CharUnits StackOffset;
1033 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1034
1035 // Put 'this' into the struct before 'sret', if necessary.
1036 bool IsThisCall =
1037 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1038 ABIArgInfo &Ret = FI.getReturnInfo();
1039 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1040 isArgInAlloca(I->info)) {
1041 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1042 ++I;
1043 }
1044
1045 // Put the sret parameter into the inalloca struct if it's in memory.
1046 if (Ret.isIndirect() && !Ret.getInReg()) {
1047 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
1048 // On Windows, the hidden sret parameter is always returned in eax.
1049 Ret.setInAllocaSRet(IsWin32StructABI);
1050 }
1051
1052 // Skip the 'this' parameter in ecx.
1053 if (IsThisCall)
1054 ++I;
1055
1056 // Put arguments passed in memory into the struct.
1057 for (; I != E; ++I) {
1058 if (isArgInAlloca(I->info))
1059 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1060 }
1061
1062 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1063 /*isPacked=*/true),
1064 StackAlign);
1065 }
1066
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const1067 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1068 Address VAListAddr, QualType Ty) const {
1069
1070 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1071
1072 // x86-32 changes the alignment of certain arguments on the stack.
1073 //
1074 // Just messing with TypeInfo like this works because we never pass
1075 // anything indirectly.
1076 TypeInfo.Align = CharUnits::fromQuantity(
1077 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1078
1079 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1080 TypeInfo, CharUnits::fromQuantity(4),
1081 /*AllowHigherAlign*/ true);
1082 }
1083
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)1084 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1085 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1086 assert(Triple.getArch() == llvm::Triple::x86);
1087
1088 switch (Opts.getStructReturnConvention()) {
1089 case CodeGenOptions::SRCK_Default:
1090 break;
1091 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1092 return false;
1093 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1094 return true;
1095 }
1096
1097 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1098 return true;
1099
1100 switch (Triple.getOS()) {
1101 case llvm::Triple::DragonFly:
1102 case llvm::Triple::FreeBSD:
1103 case llvm::Triple::OpenBSD:
1104 case llvm::Triple::Win32:
1105 return true;
1106 default:
1107 return false;
1108 }
1109 }
1110
addX86InterruptAttrs(const FunctionDecl * FD,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM)1111 static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
1112 CodeGen::CodeGenModule &CGM) {
1113 if (!FD->hasAttr<AnyX86InterruptAttr>())
1114 return;
1115
1116 llvm::Function *Fn = cast<llvm::Function>(GV);
1117 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1118 if (FD->getNumParams() == 0)
1119 return;
1120
1121 auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
1122 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
1123 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1124 Fn->getContext(), ByValTy);
1125 Fn->addParamAttr(0, NewAttr);
1126 }
1127
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1128 void X86_32TargetCodeGenInfo::setTargetAttributes(
1129 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1130 if (GV->isDeclaration())
1131 return;
1132 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1133 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1134 llvm::Function *Fn = cast<llvm::Function>(GV);
1135 Fn->addFnAttr("stackrealign");
1136 }
1137
1138 addX86InterruptAttrs(FD, GV, CGM);
1139 }
1140 }
1141
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1142 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1143 CodeGen::CodeGenFunction &CGF,
1144 llvm::Value *Address) const {
1145 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1146
1147 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1148
1149 // 0-7 are the eight integer registers; the order is different
1150 // on Darwin (for EH), but the range is the same.
1151 // 8 is %eip.
1152 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1153
1154 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1155 // 12-16 are st(0..4). Not sure why we stop at 4.
1156 // These have size 16, which is sizeof(long double) on
1157 // platforms with 8-byte alignment for that type.
1158 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1159 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1160
1161 } else {
1162 // 9 is %eflags, which doesn't get a size on Darwin for some
1163 // reason.
1164 Builder.CreateAlignedStore(
1165 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1166 CharUnits::One());
1167
1168 // 11-16 are st(0..5). Not sure why we stop at 5.
1169 // These have size 12, which is sizeof(long double) on
1170 // platforms with 4-byte alignment for that type.
1171 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1172 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1173 }
1174
1175 return false;
1176 }
1177
1178 //===----------------------------------------------------------------------===//
1179 // X86-64 ABI Implementation
1180 //===----------------------------------------------------------------------===//
1181
1182
1183 namespace {
1184
1185 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel)1186 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1187 switch (AVXLevel) {
1188 case X86AVXABILevel::AVX512:
1189 return 512;
1190 case X86AVXABILevel::AVX:
1191 return 256;
1192 case X86AVXABILevel::None:
1193 return 128;
1194 }
1195 llvm_unreachable("Unknown AVXLevel");
1196 }
1197
1198 /// X86_64ABIInfo - The X86_64 ABI information.
1199 class X86_64ABIInfo : public ABIInfo {
1200 enum Class {
1201 Integer = 0,
1202 SSE,
1203 SSEUp,
1204 X87,
1205 X87Up,
1206 ComplexX87,
1207 NoClass,
1208 Memory
1209 };
1210
1211 /// merge - Implement the X86_64 ABI merging algorithm.
1212 ///
1213 /// Merge an accumulating classification \arg Accum with a field
1214 /// classification \arg Field.
1215 ///
1216 /// \param Accum - The accumulating classification. This should
1217 /// always be either NoClass or the result of a previous merge
1218 /// call. In addition, this should never be Memory (the caller
1219 /// should just return Memory for the aggregate).
1220 static Class merge(Class Accum, Class Field);
1221
1222 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1223 ///
1224 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1225 /// final MEMORY or SSE classes when necessary.
1226 ///
1227 /// \param AggregateSize - The size of the current aggregate in
1228 /// the classification process.
1229 ///
1230 /// \param Lo - The classification for the parts of the type
1231 /// residing in the low word of the containing object.
1232 ///
1233 /// \param Hi - The classification for the parts of the type
1234 /// residing in the higher words of the containing object.
1235 ///
1236 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1237
1238 /// classify - Determine the x86_64 register classes in which the
1239 /// given type T should be passed.
1240 ///
1241 /// \param Lo - The classification for the parts of the type
1242 /// residing in the low word of the containing object.
1243 ///
1244 /// \param Hi - The classification for the parts of the type
1245 /// residing in the high word of the containing object.
1246 ///
1247 /// \param OffsetBase - The bit offset of this type in the
1248 /// containing object. Some parameters are classified different
1249 /// depending on whether they straddle an eightbyte boundary.
1250 ///
1251 /// \param isNamedArg - Whether the argument in question is a "named"
1252 /// argument, as used in AMD64-ABI 3.5.7.
1253 ///
1254 /// \param IsRegCall - Whether the calling conversion is regcall.
1255 ///
1256 /// If a word is unused its result will be NoClass; if a type should
1257 /// be passed in Memory then at least the classification of \arg Lo
1258 /// will be Memory.
1259 ///
1260 /// The \arg Lo class will be NoClass iff the argument is ignored.
1261 ///
1262 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1263 /// also be ComplexX87.
1264 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1265 bool isNamedArg, bool IsRegCall = false) const;
1266
1267 llvm::Type *GetByteVectorType(QualType Ty) const;
1268 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1269 unsigned IROffset, QualType SourceTy,
1270 unsigned SourceOffset) const;
1271 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1272 unsigned IROffset, QualType SourceTy,
1273 unsigned SourceOffset) const;
1274
1275 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1276 /// such that the argument will be returned in memory.
1277 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1278
1279 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1280 /// such that the argument will be passed in memory.
1281 ///
1282 /// \param freeIntRegs - The number of free integer registers remaining
1283 /// available.
1284 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1285
1286 ABIArgInfo classifyReturnType(QualType RetTy) const;
1287
1288 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
1289 unsigned &neededInt, unsigned &neededSSE,
1290 bool isNamedArg,
1291 bool IsRegCall = false) const;
1292
1293 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
1294 unsigned &NeededSSE,
1295 unsigned &MaxVectorWidth) const;
1296
1297 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
1298 unsigned &NeededSSE,
1299 unsigned &MaxVectorWidth) const;
1300
1301 bool IsIllegalVectorType(QualType Ty) const;
1302
1303 /// The 0.98 ABI revision clarified a lot of ambiguities,
1304 /// unfortunately in ways that were not always consistent with
1305 /// certain previous compilers. In particular, platforms which
1306 /// required strict binary compatibility with older versions of GCC
1307 /// may need to exempt themselves.
honorsRevision0_98() const1308 bool honorsRevision0_98() const {
1309 return !getTarget().getTriple().isOSDarwin();
1310 }
1311
1312 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
1313 /// classify it as INTEGER (for compatibility with older clang compilers).
classifyIntegerMMXAsSSE() const1314 bool classifyIntegerMMXAsSSE() const {
1315 // Clang <= 3.8 did not do this.
1316 if (getContext().getLangOpts().getClangABICompat() <=
1317 LangOptions::ClangABI::Ver3_8)
1318 return false;
1319
1320 const llvm::Triple &Triple = getTarget().getTriple();
1321 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1322 return false;
1323 return true;
1324 }
1325
1326 // GCC classifies vectors of __int128 as memory.
passInt128VectorsInMem() const1327 bool passInt128VectorsInMem() const {
1328 // Clang <= 9.0 did not do this.
1329 if (getContext().getLangOpts().getClangABICompat() <=
1330 LangOptions::ClangABI::Ver9)
1331 return false;
1332
1333 const llvm::Triple &T = getTarget().getTriple();
1334 return T.isOSLinux() || T.isOSNetBSD();
1335 }
1336
1337 X86AVXABILevel AVXLevel;
1338 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1339 // 64-bit hardware.
1340 bool Has64BitPointers;
1341
1342 public:
X86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)1343 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1344 : ABIInfo(CGT), AVXLevel(AVXLevel),
1345 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1346
isPassedUsingAVXType(QualType type) const1347 bool isPassedUsingAVXType(QualType type) const {
1348 unsigned neededInt, neededSSE;
1349 // The freeIntRegs argument doesn't matter here.
1350 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1351 /*isNamedArg*/true);
1352 if (info.isDirect()) {
1353 llvm::Type *ty = info.getCoerceToType();
1354 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1355 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1356 }
1357 return false;
1358 }
1359
1360 void computeInfo(CGFunctionInfo &FI) const override;
1361
1362 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1363 QualType Ty) const override;
1364 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1365 QualType Ty) const override;
1366
has64BitPointers() const1367 bool has64BitPointers() const {
1368 return Has64BitPointers;
1369 }
1370 };
1371
1372 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1373 class WinX86_64ABIInfo : public ABIInfo {
1374 public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)1375 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1376 : ABIInfo(CGT), AVXLevel(AVXLevel),
1377 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1378
1379 void computeInfo(CGFunctionInfo &FI) const override;
1380
1381 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1382 QualType Ty) const override;
1383
isHomogeneousAggregateBaseType(QualType Ty) const1384 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1385 // FIXME: Assumes vectorcall is in use.
1386 return isX86VectorTypeForVectorCall(getContext(), Ty);
1387 }
1388
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const1389 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1390 uint64_t NumMembers) const override {
1391 // FIXME: Assumes vectorcall is in use.
1392 return isX86VectorCallAggregateSmallEnough(NumMembers);
1393 }
1394
1395 private:
1396 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
1397 bool IsVectorCall, bool IsRegCall) const;
1398 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
1399 const ABIArgInfo ¤t) const;
1400
1401 X86AVXABILevel AVXLevel;
1402
1403 bool IsMingw64;
1404 };
1405
1406 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1407 public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)1408 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1409 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1410 SwiftInfo =
1411 std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
1412 }
1413
1414 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
1415 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
markARCOptimizedReturnCallsAsNoTail() const1416 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
1417
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1418 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1419 return 7;
1420 }
1421
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1422 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1423 llvm::Value *Address) const override {
1424 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1425
1426 // 0-15 are the 16 integer registers.
1427 // 16 is %rip.
1428 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1429 return false;
1430 }
1431
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const1432 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1433 StringRef Constraint,
1434 llvm::Type* Ty) const override {
1435 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1436 }
1437
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const1438 bool isNoProtoCallVariadic(const CallArgList &args,
1439 const FunctionNoProtoType *fnType) const override {
1440 // The default CC on x86-64 sets %al to the number of SSA
1441 // registers used, and GCC sets this when calling an unprototyped
1442 // function, so we override the default behavior. However, don't do
1443 // that when AVX types are involved: the ABI explicitly states it is
1444 // undefined, and it doesn't work in practice because of how the ABI
1445 // defines varargs anyway.
1446 if (fnType->getCallConv() == CC_C) {
1447 bool HasAVXType = false;
1448 for (CallArgList::const_iterator
1449 it = args.begin(), ie = args.end(); it != ie; ++it) {
1450 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1451 HasAVXType = true;
1452 break;
1453 }
1454 }
1455
1456 if (!HasAVXType)
1457 return true;
1458 }
1459
1460 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1461 }
1462
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1463 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1464 CodeGen::CodeGenModule &CGM) const override {
1465 if (GV->isDeclaration())
1466 return;
1467 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1468 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1469 llvm::Function *Fn = cast<llvm::Function>(GV);
1470 Fn->addFnAttr("stackrealign");
1471 }
1472
1473 addX86InterruptAttrs(FD, GV, CGM);
1474 }
1475 }
1476
1477 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1478 const FunctionDecl *Caller,
1479 const FunctionDecl *Callee,
1480 const CallArgList &Args) const override;
1481 };
1482 } // namespace
1483
initFeatureMaps(const ASTContext & Ctx,llvm::StringMap<bool> & CallerMap,const FunctionDecl * Caller,llvm::StringMap<bool> & CalleeMap,const FunctionDecl * Callee)1484 static void initFeatureMaps(const ASTContext &Ctx,
1485 llvm::StringMap<bool> &CallerMap,
1486 const FunctionDecl *Caller,
1487 llvm::StringMap<bool> &CalleeMap,
1488 const FunctionDecl *Callee) {
1489 if (CalleeMap.empty() && CallerMap.empty()) {
1490 // The caller is potentially nullptr in the case where the call isn't in a
1491 // function. In this case, the getFunctionFeatureMap ensures we just get
1492 // the TU level setting (since it cannot be modified by 'target'..
1493 Ctx.getFunctionFeatureMap(CallerMap, Caller);
1494 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
1495 }
1496 }
1497
checkAVXParamFeature(DiagnosticsEngine & Diag,SourceLocation CallLoc,const llvm::StringMap<bool> & CallerMap,const llvm::StringMap<bool> & CalleeMap,QualType Ty,StringRef Feature,bool IsArgument)1498 static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
1499 SourceLocation CallLoc,
1500 const llvm::StringMap<bool> &CallerMap,
1501 const llvm::StringMap<bool> &CalleeMap,
1502 QualType Ty, StringRef Feature,
1503 bool IsArgument) {
1504 bool CallerHasFeat = CallerMap.lookup(Feature);
1505 bool CalleeHasFeat = CalleeMap.lookup(Feature);
1506 if (!CallerHasFeat && !CalleeHasFeat)
1507 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1508 << IsArgument << Ty << Feature;
1509
1510 // Mixing calling conventions here is very clearly an error.
1511 if (!CallerHasFeat || !CalleeHasFeat)
1512 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1513 << IsArgument << Ty << Feature;
1514
1515 // Else, both caller and callee have the required feature, so there is no need
1516 // to diagnose.
1517 return false;
1518 }
1519
checkAVX512ParamFeature(DiagnosticsEngine & Diag,SourceLocation CallLoc,const llvm::StringMap<bool> & CallerMap,const llvm::StringMap<bool> & CalleeMap,QualType Ty,bool IsArgument)1520 static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag,
1521 SourceLocation CallLoc,
1522 const llvm::StringMap<bool> &CallerMap,
1523 const llvm::StringMap<bool> &CalleeMap,
1524 QualType Ty, bool IsArgument) {
1525 bool Caller256 = CallerMap.lookup("avx512f") && !CallerMap.lookup("evex512");
1526 bool Callee256 = CalleeMap.lookup("avx512f") && !CalleeMap.lookup("evex512");
1527
1528 // Forbid 512-bit or larger vector pass or return when we disabled ZMM
1529 // instructions.
1530 if (Caller256 || Callee256)
1531 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1532 << IsArgument << Ty << "evex512";
1533
1534 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
1535 "avx512f", IsArgument);
1536 }
1537
checkAVXParam(DiagnosticsEngine & Diag,ASTContext & Ctx,SourceLocation CallLoc,const llvm::StringMap<bool> & CallerMap,const llvm::StringMap<bool> & CalleeMap,QualType Ty,bool IsArgument)1538 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
1539 SourceLocation CallLoc,
1540 const llvm::StringMap<bool> &CallerMap,
1541 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
1542 bool IsArgument) {
1543 uint64_t Size = Ctx.getTypeSize(Ty);
1544 if (Size > 256)
1545 return checkAVX512ParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
1546 IsArgument);
1547
1548 if (Size > 128)
1549 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
1550 IsArgument);
1551
1552 return false;
1553 }
1554
checkFunctionCallABI(CodeGenModule & CGM,SourceLocation CallLoc,const FunctionDecl * Caller,const FunctionDecl * Callee,const CallArgList & Args) const1555 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
1556 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
1557 const FunctionDecl *Callee, const CallArgList &Args) const {
1558 llvm::StringMap<bool> CallerMap;
1559 llvm::StringMap<bool> CalleeMap;
1560 unsigned ArgIndex = 0;
1561
1562 // We need to loop through the actual call arguments rather than the
1563 // function's parameters, in case this variadic.
1564 for (const CallArg &Arg : Args) {
1565 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
1566 // additionally changes how vectors >256 in size are passed. Like GCC, we
1567 // warn when a function is called with an argument where this will change.
1568 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
1569 // the caller and callee features are mismatched.
1570 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
1571 // change its ABI with attribute-target after this call.
1572 if (Arg.getType()->isVectorType() &&
1573 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
1574 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
1575 QualType Ty = Arg.getType();
1576 // The CallArg seems to have desugared the type already, so for clearer
1577 // diagnostics, replace it with the type in the FunctionDecl if possible.
1578 if (ArgIndex < Callee->getNumParams())
1579 Ty = Callee->getParamDecl(ArgIndex)->getType();
1580
1581 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
1582 CalleeMap, Ty, /*IsArgument*/ true))
1583 return;
1584 }
1585 ++ArgIndex;
1586 }
1587
1588 // Check return always, as we don't have a good way of knowing in codegen
1589 // whether this value is used, tail-called, etc.
1590 if (Callee->getReturnType()->isVectorType() &&
1591 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
1592 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
1593 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
1594 CalleeMap, Callee->getReturnType(),
1595 /*IsArgument*/ false);
1596 }
1597 }
1598
qualifyWindowsLibrary(StringRef Lib)1599 std::string TargetCodeGenInfo::qualifyWindowsLibrary(StringRef Lib) {
1600 // If the argument does not end in .lib, automatically add the suffix.
1601 // If the argument contains a space, enclose it in quotes.
1602 // This matches the behavior of MSVC.
1603 bool Quote = Lib.contains(' ');
1604 std::string ArgStr = Quote ? "\"" : "";
1605 ArgStr += Lib;
1606 if (!Lib.ends_with_insensitive(".lib") && !Lib.ends_with_insensitive(".a"))
1607 ArgStr += ".lib";
1608 ArgStr += Quote ? "\"" : "";
1609 return ArgStr;
1610 }
1611
1612 namespace {
1613 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1614 public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters)1615 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1616 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
1617 unsigned NumRegisterParameters)
1618 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1619 Win32StructABI, NumRegisterParameters, false) {}
1620
1621 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1622 CodeGen::CodeGenModule &CGM) const override;
1623
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const1624 void getDependentLibraryOption(llvm::StringRef Lib,
1625 llvm::SmallString<24> &Opt) const override {
1626 Opt = "/DEFAULTLIB:";
1627 Opt += qualifyWindowsLibrary(Lib);
1628 }
1629
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const1630 void getDetectMismatchOption(llvm::StringRef Name,
1631 llvm::StringRef Value,
1632 llvm::SmallString<32> &Opt) const override {
1633 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1634 }
1635 };
1636 } // namespace
1637
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1638 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1639 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1640 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1641 if (GV->isDeclaration())
1642 return;
1643 addStackProbeTargetAttributes(D, GV, CGM);
1644 }
1645
1646 namespace {
1647 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1648 public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)1649 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1650 X86AVXABILevel AVXLevel)
1651 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1652 SwiftInfo =
1653 std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
1654 }
1655
1656 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1657 CodeGen::CodeGenModule &CGM) const override;
1658
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const1659 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1660 return 7;
1661 }
1662
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1663 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1664 llvm::Value *Address) const override {
1665 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1666
1667 // 0-15 are the 16 integer registers.
1668 // 16 is %rip.
1669 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1670 return false;
1671 }
1672
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const1673 void getDependentLibraryOption(llvm::StringRef Lib,
1674 llvm::SmallString<24> &Opt) const override {
1675 Opt = "/DEFAULTLIB:";
1676 Opt += qualifyWindowsLibrary(Lib);
1677 }
1678
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const1679 void getDetectMismatchOption(llvm::StringRef Name,
1680 llvm::StringRef Value,
1681 llvm::SmallString<32> &Opt) const override {
1682 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1683 }
1684 };
1685 } // namespace
1686
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1687 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1688 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1689 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1690 if (GV->isDeclaration())
1691 return;
1692 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1693 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1694 llvm::Function *Fn = cast<llvm::Function>(GV);
1695 Fn->addFnAttr("stackrealign");
1696 }
1697
1698 addX86InterruptAttrs(FD, GV, CGM);
1699 }
1700
1701 addStackProbeTargetAttributes(D, GV, CGM);
1702 }
1703
postMerge(unsigned AggregateSize,Class & Lo,Class & Hi) const1704 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1705 Class &Hi) const {
1706 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1707 //
1708 // (a) If one of the classes is Memory, the whole argument is passed in
1709 // memory.
1710 //
1711 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1712 // memory.
1713 //
1714 // (c) If the size of the aggregate exceeds two eightbytes and the first
1715 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1716 // argument is passed in memory. NOTE: This is necessary to keep the
1717 // ABI working for processors that don't support the __m256 type.
1718 //
1719 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1720 //
1721 // Some of these are enforced by the merging logic. Others can arise
1722 // only with unions; for example:
1723 // union { _Complex double; unsigned; }
1724 //
1725 // Note that clauses (b) and (c) were added in 0.98.
1726 //
1727 if (Hi == Memory)
1728 Lo = Memory;
1729 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1730 Lo = Memory;
1731 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1732 Lo = Memory;
1733 if (Hi == SSEUp && Lo != SSE)
1734 Hi = SSE;
1735 }
1736
merge(Class Accum,Class Field)1737 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1738 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1739 // classified recursively so that always two fields are
1740 // considered. The resulting class is calculated according to
1741 // the classes of the fields in the eightbyte:
1742 //
1743 // (a) If both classes are equal, this is the resulting class.
1744 //
1745 // (b) If one of the classes is NO_CLASS, the resulting class is
1746 // the other class.
1747 //
1748 // (c) If one of the classes is MEMORY, the result is the MEMORY
1749 // class.
1750 //
1751 // (d) If one of the classes is INTEGER, the result is the
1752 // INTEGER.
1753 //
1754 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1755 // MEMORY is used as class.
1756 //
1757 // (f) Otherwise class SSE is used.
1758
1759 // Accum should never be memory (we should have returned) or
1760 // ComplexX87 (because this cannot be passed in a structure).
1761 assert((Accum != Memory && Accum != ComplexX87) &&
1762 "Invalid accumulated classification during merge.");
1763 if (Accum == Field || Field == NoClass)
1764 return Accum;
1765 if (Field == Memory)
1766 return Memory;
1767 if (Accum == NoClass)
1768 return Field;
1769 if (Accum == Integer || Field == Integer)
1770 return Integer;
1771 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1772 Accum == X87 || Accum == X87Up)
1773 return Memory;
1774 return SSE;
1775 }
1776
classify(QualType Ty,uint64_t OffsetBase,Class & Lo,Class & Hi,bool isNamedArg,bool IsRegCall) const1777 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
1778 Class &Hi, bool isNamedArg, bool IsRegCall) const {
1779 // FIXME: This code can be simplified by introducing a simple value class for
1780 // Class pairs with appropriate constructor methods for the various
1781 // situations.
1782
1783 // FIXME: Some of the split computations are wrong; unaligned vectors
1784 // shouldn't be passed in registers for example, so there is no chance they
1785 // can straddle an eightbyte. Verify & simplify.
1786
1787 Lo = Hi = NoClass;
1788
1789 Class &Current = OffsetBase < 64 ? Lo : Hi;
1790 Current = Memory;
1791
1792 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1793 BuiltinType::Kind k = BT->getKind();
1794
1795 if (k == BuiltinType::Void) {
1796 Current = NoClass;
1797 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1798 Lo = Integer;
1799 Hi = Integer;
1800 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1801 Current = Integer;
1802 } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1803 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1804 Current = SSE;
1805 } else if (k == BuiltinType::Float128) {
1806 Lo = SSE;
1807 Hi = SSEUp;
1808 } else if (k == BuiltinType::LongDouble) {
1809 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1810 if (LDF == &llvm::APFloat::IEEEquad()) {
1811 Lo = SSE;
1812 Hi = SSEUp;
1813 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1814 Lo = X87;
1815 Hi = X87Up;
1816 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
1817 Current = SSE;
1818 } else
1819 llvm_unreachable("unexpected long double representation!");
1820 }
1821 // FIXME: _Decimal32 and _Decimal64 are SSE.
1822 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1823 return;
1824 }
1825
1826 if (const EnumType *ET = Ty->getAs<EnumType>()) {
1827 // Classify the underlying integer type.
1828 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1829 return;
1830 }
1831
1832 if (Ty->hasPointerRepresentation()) {
1833 Current = Integer;
1834 return;
1835 }
1836
1837 if (Ty->isMemberPointerType()) {
1838 if (Ty->isMemberFunctionPointerType()) {
1839 if (Has64BitPointers) {
1840 // If Has64BitPointers, this is an {i64, i64}, so classify both
1841 // Lo and Hi now.
1842 Lo = Hi = Integer;
1843 } else {
1844 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1845 // straddles an eightbyte boundary, Hi should be classified as well.
1846 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1847 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1848 if (EB_FuncPtr != EB_ThisAdj) {
1849 Lo = Hi = Integer;
1850 } else {
1851 Current = Integer;
1852 }
1853 }
1854 } else {
1855 Current = Integer;
1856 }
1857 return;
1858 }
1859
1860 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1861 uint64_t Size = getContext().getTypeSize(VT);
1862 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1863 // gcc passes the following as integer:
1864 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
1865 // 2 bytes - <2 x char>, <1 x short>
1866 // 1 byte - <1 x char>
1867 Current = Integer;
1868
1869 // If this type crosses an eightbyte boundary, it should be
1870 // split.
1871 uint64_t EB_Lo = (OffsetBase) / 64;
1872 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
1873 if (EB_Lo != EB_Hi)
1874 Hi = Lo;
1875 } else if (Size == 64) {
1876 QualType ElementType = VT->getElementType();
1877
1878 // gcc passes <1 x double> in memory. :(
1879 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
1880 return;
1881
1882 // gcc passes <1 x long long> as SSE but clang used to unconditionally
1883 // pass them as integer. For platforms where clang is the de facto
1884 // platform compiler, we must continue to use integer.
1885 if (!classifyIntegerMMXAsSSE() &&
1886 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
1887 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1888 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
1889 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
1890 Current = Integer;
1891 else
1892 Current = SSE;
1893
1894 // If this type crosses an eightbyte boundary, it should be
1895 // split.
1896 if (OffsetBase && OffsetBase != 64)
1897 Hi = Lo;
1898 } else if (Size == 128 ||
1899 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1900 QualType ElementType = VT->getElementType();
1901
1902 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
1903 if (passInt128VectorsInMem() && Size != 128 &&
1904 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
1905 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
1906 return;
1907
1908 // Arguments of 256-bits are split into four eightbyte chunks. The
1909 // least significant one belongs to class SSE and all the others to class
1910 // SSEUP. The original Lo and Hi design considers that types can't be
1911 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1912 // This design isn't correct for 256-bits, but since there're no cases
1913 // where the upper parts would need to be inspected, avoid adding
1914 // complexity and just consider Hi to match the 64-256 part.
1915 //
1916 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1917 // registers if they are "named", i.e. not part of the "..." of a
1918 // variadic function.
1919 //
1920 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
1921 // split into eight eightbyte chunks, one SSE and seven SSEUP.
1922 Lo = SSE;
1923 Hi = SSEUp;
1924 }
1925 return;
1926 }
1927
1928 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1929 QualType ET = getContext().getCanonicalType(CT->getElementType());
1930
1931 uint64_t Size = getContext().getTypeSize(Ty);
1932 if (ET->isIntegralOrEnumerationType()) {
1933 if (Size <= 64)
1934 Current = Integer;
1935 else if (Size <= 128)
1936 Lo = Hi = Integer;
1937 } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||
1938 ET->isBFloat16Type()) {
1939 Current = SSE;
1940 } else if (ET == getContext().DoubleTy) {
1941 Lo = Hi = SSE;
1942 } else if (ET == getContext().LongDoubleTy) {
1943 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1944 if (LDF == &llvm::APFloat::IEEEquad())
1945 Current = Memory;
1946 else if (LDF == &llvm::APFloat::x87DoubleExtended())
1947 Current = ComplexX87;
1948 else if (LDF == &llvm::APFloat::IEEEdouble())
1949 Lo = Hi = SSE;
1950 else
1951 llvm_unreachable("unexpected long double representation!");
1952 }
1953
1954 // If this complex type crosses an eightbyte boundary then it
1955 // should be split.
1956 uint64_t EB_Real = (OffsetBase) / 64;
1957 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1958 if (Hi == NoClass && EB_Real != EB_Imag)
1959 Hi = Lo;
1960
1961 return;
1962 }
1963
1964 if (const auto *EITy = Ty->getAs<BitIntType>()) {
1965 if (EITy->getNumBits() <= 64)
1966 Current = Integer;
1967 else if (EITy->getNumBits() <= 128)
1968 Lo = Hi = Integer;
1969 // Larger values need to get passed in memory.
1970 return;
1971 }
1972
1973 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1974 // Arrays are treated like structures.
1975
1976 uint64_t Size = getContext().getTypeSize(Ty);
1977
1978 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1979 // than eight eightbytes, ..., it has class MEMORY.
1980 // regcall ABI doesn't have limitation to an object. The only limitation
1981 // is the free registers, which will be checked in computeInfo.
1982 if (!IsRegCall && Size > 512)
1983 return;
1984
1985 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1986 // fields, it has class MEMORY.
1987 //
1988 // Only need to check alignment of array base.
1989 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1990 return;
1991
1992 // Otherwise implement simplified merge. We could be smarter about
1993 // this, but it isn't worth it and would be harder to verify.
1994 Current = NoClass;
1995 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1996 uint64_t ArraySize = AT->getSize().getZExtValue();
1997
1998 // The only case a 256-bit wide vector could be used is when the array
1999 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2000 // to work for sizes wider than 128, early check and fallback to memory.
2001 //
2002 if (Size > 128 &&
2003 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2004 return;
2005
2006 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2007 Class FieldLo, FieldHi;
2008 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2009 Lo = merge(Lo, FieldLo);
2010 Hi = merge(Hi, FieldHi);
2011 if (Lo == Memory || Hi == Memory)
2012 break;
2013 }
2014
2015 postMerge(Size, Lo, Hi);
2016 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2017 return;
2018 }
2019
2020 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2021 uint64_t Size = getContext().getTypeSize(Ty);
2022
2023 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2024 // than eight eightbytes, ..., it has class MEMORY.
2025 if (Size > 512)
2026 return;
2027
2028 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2029 // copy constructor or a non-trivial destructor, it is passed by invisible
2030 // reference.
2031 if (getRecordArgABI(RT, getCXXABI()))
2032 return;
2033
2034 const RecordDecl *RD = RT->getDecl();
2035
2036 // Assume variable sized types are passed in memory.
2037 if (RD->hasFlexibleArrayMember())
2038 return;
2039
2040 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2041
2042 // Reset Lo class, this will be recomputed.
2043 Current = NoClass;
2044
2045 // If this is a C++ record, classify the bases first.
2046 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2047 for (const auto &I : CXXRD->bases()) {
2048 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2049 "Unexpected base class!");
2050 const auto *Base =
2051 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2052
2053 // Classify this field.
2054 //
2055 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2056 // single eightbyte, each is classified separately. Each eightbyte gets
2057 // initialized to class NO_CLASS.
2058 Class FieldLo, FieldHi;
2059 uint64_t Offset =
2060 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2061 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2062 Lo = merge(Lo, FieldLo);
2063 Hi = merge(Hi, FieldHi);
2064 if (Lo == Memory || Hi == Memory) {
2065 postMerge(Size, Lo, Hi);
2066 return;
2067 }
2068 }
2069 }
2070
2071 // Classify the fields one at a time, merging the results.
2072 unsigned idx = 0;
2073 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2074 LangOptions::ClangABI::Ver11 ||
2075 getContext().getTargetInfo().getTriple().isPS();
2076 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2077
2078 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2079 i != e; ++i, ++idx) {
2080 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2081 bool BitField = i->isBitField();
2082
2083 // Ignore padding bit-fields.
2084 if (BitField && i->isUnnamedBitfield())
2085 continue;
2086
2087 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2088 // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
2089 //
2090 // The only case a 256-bit or a 512-bit wide vector could be used is when
2091 // the struct contains a single 256-bit or 512-bit element. Early check
2092 // and fallback to memory.
2093 //
2094 // FIXME: Extended the Lo and Hi logic properly to work for size wider
2095 // than 128.
2096 if (Size > 128 &&
2097 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2098 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2099 Lo = Memory;
2100 postMerge(Size, Lo, Hi);
2101 return;
2102 }
2103 // Note, skip this test for bit-fields, see below.
2104 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2105 Lo = Memory;
2106 postMerge(Size, Lo, Hi);
2107 return;
2108 }
2109
2110 // Classify this field.
2111 //
2112 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2113 // exceeds a single eightbyte, each is classified
2114 // separately. Each eightbyte gets initialized to class
2115 // NO_CLASS.
2116 Class FieldLo, FieldHi;
2117
2118 // Bit-fields require special handling, they do not force the
2119 // structure to be passed in memory even if unaligned, and
2120 // therefore they can straddle an eightbyte.
2121 if (BitField) {
2122 assert(!i->isUnnamedBitfield());
2123 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2124 uint64_t Size = i->getBitWidthValue(getContext());
2125
2126 uint64_t EB_Lo = Offset / 64;
2127 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2128
2129 if (EB_Lo) {
2130 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2131 FieldLo = NoClass;
2132 FieldHi = Integer;
2133 } else {
2134 FieldLo = Integer;
2135 FieldHi = EB_Hi ? Integer : NoClass;
2136 }
2137 } else
2138 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2139 Lo = merge(Lo, FieldLo);
2140 Hi = merge(Hi, FieldHi);
2141 if (Lo == Memory || Hi == Memory)
2142 break;
2143 }
2144
2145 postMerge(Size, Lo, Hi);
2146 }
2147 }
2148
getIndirectReturnResult(QualType Ty) const2149 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2150 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2151 // place naturally.
2152 if (!isAggregateTypeForABI(Ty)) {
2153 // Treat an enum type as its underlying type.
2154 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2155 Ty = EnumTy->getDecl()->getIntegerType();
2156
2157 if (Ty->isBitIntType())
2158 return getNaturalAlignIndirect(Ty);
2159
2160 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
2161 : ABIArgInfo::getDirect());
2162 }
2163
2164 return getNaturalAlignIndirect(Ty);
2165 }
2166
IsIllegalVectorType(QualType Ty) const2167 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2168 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2169 uint64_t Size = getContext().getTypeSize(VecTy);
2170 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2171 if (Size <= 64 || Size > LargestVector)
2172 return true;
2173 QualType EltTy = VecTy->getElementType();
2174 if (passInt128VectorsInMem() &&
2175 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
2176 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
2177 return true;
2178 }
2179
2180 return false;
2181 }
2182
getIndirectResult(QualType Ty,unsigned freeIntRegs) const2183 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2184 unsigned freeIntRegs) const {
2185 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2186 // place naturally.
2187 //
2188 // This assumption is optimistic, as there could be free registers available
2189 // when we need to pass this argument in memory, and LLVM could try to pass
2190 // the argument in the free register. This does not seem to happen currently,
2191 // but this code would be much safer if we could mark the argument with
2192 // 'onstack'. See PR12193.
2193 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
2194 !Ty->isBitIntType()) {
2195 // Treat an enum type as its underlying type.
2196 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2197 Ty = EnumTy->getDecl()->getIntegerType();
2198
2199 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
2200 : ABIArgInfo::getDirect());
2201 }
2202
2203 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2204 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2205
2206 // Compute the byval alignment. We specify the alignment of the byval in all
2207 // cases so that the mid-level optimizer knows the alignment of the byval.
2208 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2209
2210 // Attempt to avoid passing indirect results using byval when possible. This
2211 // is important for good codegen.
2212 //
2213 // We do this by coercing the value into a scalar type which the backend can
2214 // handle naturally (i.e., without using byval).
2215 //
2216 // For simplicity, we currently only do this when we have exhausted all of the
2217 // free integer registers. Doing this when there are free integer registers
2218 // would require more care, as we would have to ensure that the coerced value
2219 // did not claim the unused register. That would require either reording the
2220 // arguments to the function (so that any subsequent inreg values came first),
2221 // or only doing this optimization when there were no following arguments that
2222 // might be inreg.
2223 //
2224 // We currently expect it to be rare (particularly in well written code) for
2225 // arguments to be passed on the stack when there are still free integer
2226 // registers available (this would typically imply large structs being passed
2227 // by value), so this seems like a fair tradeoff for now.
2228 //
2229 // We can revisit this if the backend grows support for 'onstack' parameter
2230 // attributes. See PR12193.
2231 if (freeIntRegs == 0) {
2232 uint64_t Size = getContext().getTypeSize(Ty);
2233
2234 // If this type fits in an eightbyte, coerce it into the matching integral
2235 // type, which will end up on the stack (with alignment 8).
2236 if (Align == 8 && Size <= 64)
2237 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2238 Size));
2239 }
2240
2241 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2242 }
2243
2244 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2245 /// register. Pick an LLVM IR type that will be passed as a vector register.
GetByteVectorType(QualType Ty) const2246 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2247 // Wrapper structs/arrays that only contain vectors are passed just like
2248 // vectors; strip them off if present.
2249 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2250 Ty = QualType(InnerTy, 0);
2251
2252 llvm::Type *IRType = CGT.ConvertType(Ty);
2253 if (isa<llvm::VectorType>(IRType)) {
2254 // Don't pass vXi128 vectors in their native type, the backend can't
2255 // legalize them.
2256 if (passInt128VectorsInMem() &&
2257 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2258 // Use a vXi64 vector.
2259 uint64_t Size = getContext().getTypeSize(Ty);
2260 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2261 Size / 64);
2262 }
2263
2264 return IRType;
2265 }
2266
2267 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2268 return IRType;
2269
2270 // We couldn't find the preferred IR vector type for 'Ty'.
2271 uint64_t Size = getContext().getTypeSize(Ty);
2272 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2273
2274
2275 // Return a LLVM IR vector type based on the size of 'Ty'.
2276 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2277 Size / 64);
2278 }
2279
2280 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2281 /// is known to either be off the end of the specified type or being in
2282 /// alignment padding. The user type specified is known to be at most 128 bits
2283 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2284 /// classification that put one of the two halves in the INTEGER class.
2285 ///
2286 /// It is conservatively correct to return false.
BitsContainNoUserData(QualType Ty,unsigned StartBit,unsigned EndBit,ASTContext & Context)2287 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2288 unsigned EndBit, ASTContext &Context) {
2289 // If the bytes being queried are off the end of the type, there is no user
2290 // data hiding here. This handles analysis of builtins, vectors and other
2291 // types that don't contain interesting padding.
2292 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2293 if (TySize <= StartBit)
2294 return true;
2295
2296 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2297 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2298 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2299
2300 // Check each element to see if the element overlaps with the queried range.
2301 for (unsigned i = 0; i != NumElts; ++i) {
2302 // If the element is after the span we care about, then we're done..
2303 unsigned EltOffset = i*EltSize;
2304 if (EltOffset >= EndBit) break;
2305
2306 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2307 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2308 EndBit-EltOffset, Context))
2309 return false;
2310 }
2311 // If it overlaps no elements, then it is safe to process as padding.
2312 return true;
2313 }
2314
2315 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2316 const RecordDecl *RD = RT->getDecl();
2317 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2318
2319 // If this is a C++ record, check the bases first.
2320 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2321 for (const auto &I : CXXRD->bases()) {
2322 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2323 "Unexpected base class!");
2324 const auto *Base =
2325 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2326
2327 // If the base is after the span we care about, ignore it.
2328 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2329 if (BaseOffset >= EndBit) continue;
2330
2331 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2332 if (!BitsContainNoUserData(I.getType(), BaseStart,
2333 EndBit-BaseOffset, Context))
2334 return false;
2335 }
2336 }
2337
2338 // Verify that no field has data that overlaps the region of interest. Yes
2339 // this could be sped up a lot by being smarter about queried fields,
2340 // however we're only looking at structs up to 16 bytes, so we don't care
2341 // much.
2342 unsigned idx = 0;
2343 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2344 i != e; ++i, ++idx) {
2345 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2346
2347 // If we found a field after the region we care about, then we're done.
2348 if (FieldOffset >= EndBit) break;
2349
2350 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2351 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2352 Context))
2353 return false;
2354 }
2355
2356 // If nothing in this record overlapped the area of interest, then we're
2357 // clean.
2358 return true;
2359 }
2360
2361 return false;
2362 }
2363
2364 /// getFPTypeAtOffset - Return a floating point type at the specified offset.
getFPTypeAtOffset(llvm::Type * IRType,unsigned IROffset,const llvm::DataLayout & TD)2365 static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2366 const llvm::DataLayout &TD) {
2367 if (IROffset == 0 && IRType->isFloatingPointTy())
2368 return IRType;
2369
2370 // If this is a struct, recurse into the field at the specified offset.
2371 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2372 if (!STy->getNumContainedTypes())
2373 return nullptr;
2374
2375 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2376 unsigned Elt = SL->getElementContainingOffset(IROffset);
2377 IROffset -= SL->getElementOffset(Elt);
2378 return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
2379 }
2380
2381 // If this is an array, recurse into the field at the specified offset.
2382 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2383 llvm::Type *EltTy = ATy->getElementType();
2384 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2385 IROffset -= IROffset / EltSize * EltSize;
2386 return getFPTypeAtOffset(EltTy, IROffset, TD);
2387 }
2388
2389 return nullptr;
2390 }
2391
2392 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2393 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2394 llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2395 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2396 QualType SourceTy, unsigned SourceOffset) const {
2397 const llvm::DataLayout &TD = getDataLayout();
2398 unsigned SourceSize =
2399 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2400 llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
2401 if (!T0 || T0->isDoubleTy())
2402 return llvm::Type::getDoubleTy(getVMContext());
2403
2404 // Get the adjacent FP type.
2405 llvm::Type *T1 = nullptr;
2406 unsigned T0Size = TD.getTypeAllocSize(T0);
2407 if (SourceSize > T0Size)
2408 T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
2409 if (T1 == nullptr) {
2410 // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due
2411 // to its alignment.
2412 if (T0->is16bitFPTy() && SourceSize > 4)
2413 T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
2414 // If we can't get a second FP type, return a simple half or float.
2415 // avx512fp16-abi.c:pr51813_2 shows it works to return float for
2416 // {float, i8} too.
2417 if (T1 == nullptr)
2418 return T0;
2419 }
2420
2421 if (T0->isFloatTy() && T1->isFloatTy())
2422 return llvm::FixedVectorType::get(T0, 2);
2423
2424 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2425 llvm::Type *T2 = nullptr;
2426 if (SourceSize > 4)
2427 T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
2428 if (T2 == nullptr)
2429 return llvm::FixedVectorType::get(T0, 2);
2430 return llvm::FixedVectorType::get(T0, 4);
2431 }
2432
2433 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2434 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2435
2436 return llvm::Type::getDoubleTy(getVMContext());
2437 }
2438
2439
2440 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2441 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2442 /// about the high or low part of an up-to-16-byte struct. This routine picks
2443 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2444 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2445 /// etc).
2446 ///
2447 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2448 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2449 /// the 8-byte value references. PrefType may be null.
2450 ///
2451 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2452 /// an offset into this that we're processing (which is always either 0 or 8).
2453 ///
2454 llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2455 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2456 QualType SourceTy, unsigned SourceOffset) const {
2457 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2458 // returning an 8-byte unit starting with it. See if we can safely use it.
2459 if (IROffset == 0) {
2460 // Pointers and int64's always fill the 8-byte unit.
2461 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2462 IRType->isIntegerTy(64))
2463 return IRType;
2464
2465 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2466 // goodness in the source type is just tail padding. This is allowed to
2467 // kick in for struct {double,int} on the int, but not on
2468 // struct{double,int,int} because we wouldn't return the second int. We
2469 // have to do this analysis on the source type because we can't depend on
2470 // unions being lowered a specific way etc.
2471 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2472 IRType->isIntegerTy(32) ||
2473 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2474 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2475 cast<llvm::IntegerType>(IRType)->getBitWidth();
2476
2477 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2478 SourceOffset*8+64, getContext()))
2479 return IRType;
2480 }
2481 }
2482
2483 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2484 // If this is a struct, recurse into the field at the specified offset.
2485 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2486 if (IROffset < SL->getSizeInBytes()) {
2487 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2488 IROffset -= SL->getElementOffset(FieldIdx);
2489
2490 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2491 SourceTy, SourceOffset);
2492 }
2493 }
2494
2495 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2496 llvm::Type *EltTy = ATy->getElementType();
2497 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2498 unsigned EltOffset = IROffset/EltSize*EltSize;
2499 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2500 SourceOffset);
2501 }
2502
2503 // Okay, we don't have any better idea of what to pass, so we pass this in an
2504 // integer register that isn't too big to fit the rest of the struct.
2505 unsigned TySizeInBytes =
2506 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2507
2508 assert(TySizeInBytes != SourceOffset && "Empty field?");
2509
2510 // It is always safe to classify this as an integer type up to i64 that
2511 // isn't larger than the structure.
2512 return llvm::IntegerType::get(getVMContext(),
2513 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2514 }
2515
2516
2517 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2518 /// be used as elements of a two register pair to pass or return, return a
2519 /// first class aggregate to represent them. For example, if the low part of
2520 /// a by-value argument should be passed as i32* and the high part as float,
2521 /// return {i32*, float}.
2522 static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type * Lo,llvm::Type * Hi,const llvm::DataLayout & TD)2523 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2524 const llvm::DataLayout &TD) {
2525 // In order to correctly satisfy the ABI, we need to the high part to start
2526 // at offset 8. If the high and low parts we inferred are both 4-byte types
2527 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2528 // the second element at offset 8. Check for this:
2529 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2530 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2531 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2532 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2533
2534 // To handle this, we have to increase the size of the low part so that the
2535 // second element will start at an 8 byte offset. We can't increase the size
2536 // of the second element because it might make us access off the end of the
2537 // struct.
2538 if (HiStart != 8) {
2539 // There are usually two sorts of types the ABI generation code can produce
2540 // for the low part of a pair that aren't 8 bytes in size: half, float or
2541 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
2542 // NaCl).
2543 // Promote these to a larger type.
2544 if (Lo->isHalfTy() || Lo->isFloatTy())
2545 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2546 else {
2547 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2548 && "Invalid/unknown lo type");
2549 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2550 }
2551 }
2552
2553 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2554
2555 // Verify that the second element is at an 8-byte offset.
2556 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2557 "Invalid x86-64 argument pair!");
2558 return Result;
2559 }
2560
2561 ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const2562 classifyReturnType(QualType RetTy) const {
2563 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2564 // classification algorithm.
2565 X86_64ABIInfo::Class Lo, Hi;
2566 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2567
2568 // Check some invariants.
2569 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2570 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2571
2572 llvm::Type *ResType = nullptr;
2573 switch (Lo) {
2574 case NoClass:
2575 if (Hi == NoClass)
2576 return ABIArgInfo::getIgnore();
2577 // If the low part is just padding, it takes no register, leave ResType
2578 // null.
2579 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2580 "Unknown missing lo part");
2581 break;
2582
2583 case SSEUp:
2584 case X87Up:
2585 llvm_unreachable("Invalid classification for lo word.");
2586
2587 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2588 // hidden argument.
2589 case Memory:
2590 return getIndirectReturnResult(RetTy);
2591
2592 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2593 // available register of the sequence %rax, %rdx is used.
2594 case Integer:
2595 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2596
2597 // If we have a sign or zero extended integer, make sure to return Extend
2598 // so that the parameter gets the right LLVM IR attributes.
2599 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2600 // Treat an enum type as its underlying type.
2601 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2602 RetTy = EnumTy->getDecl()->getIntegerType();
2603
2604 if (RetTy->isIntegralOrEnumerationType() &&
2605 isPromotableIntegerTypeForABI(RetTy))
2606 return ABIArgInfo::getExtend(RetTy);
2607 }
2608 break;
2609
2610 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2611 // available SSE register of the sequence %xmm0, %xmm1 is used.
2612 case SSE:
2613 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2614 break;
2615
2616 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2617 // returned on the X87 stack in %st0 as 80-bit x87 number.
2618 case X87:
2619 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2620 break;
2621
2622 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2623 // part of the value is returned in %st0 and the imaginary part in
2624 // %st1.
2625 case ComplexX87:
2626 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2627 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2628 llvm::Type::getX86_FP80Ty(getVMContext()));
2629 break;
2630 }
2631
2632 llvm::Type *HighPart = nullptr;
2633 switch (Hi) {
2634 // Memory was handled previously and X87 should
2635 // never occur as a hi class.
2636 case Memory:
2637 case X87:
2638 llvm_unreachable("Invalid classification for hi word.");
2639
2640 case ComplexX87: // Previously handled.
2641 case NoClass:
2642 break;
2643
2644 case Integer:
2645 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2646 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2647 return ABIArgInfo::getDirect(HighPart, 8);
2648 break;
2649 case SSE:
2650 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2651 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2652 return ABIArgInfo::getDirect(HighPart, 8);
2653 break;
2654
2655 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2656 // is passed in the next available eightbyte chunk if the last used
2657 // vector register.
2658 //
2659 // SSEUP should always be preceded by SSE, just widen.
2660 case SSEUp:
2661 assert(Lo == SSE && "Unexpected SSEUp classification.");
2662 ResType = GetByteVectorType(RetTy);
2663 break;
2664
2665 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2666 // returned together with the previous X87 value in %st0.
2667 case X87Up:
2668 // If X87Up is preceded by X87, we don't need to do
2669 // anything. However, in some cases with unions it may not be
2670 // preceded by X87. In such situations we follow gcc and pass the
2671 // extra bits in an SSE reg.
2672 if (Lo != X87) {
2673 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2674 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2675 return ABIArgInfo::getDirect(HighPart, 8);
2676 }
2677 break;
2678 }
2679
2680 // If a high part was specified, merge it together with the low part. It is
2681 // known to pass in the high eightbyte of the result. We do this by forming a
2682 // first class struct aggregate with the high and low part: {low, high}
2683 if (HighPart)
2684 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2685
2686 return ABIArgInfo::getDirect(ResType);
2687 }
2688
2689 ABIArgInfo
classifyArgumentType(QualType Ty,unsigned freeIntRegs,unsigned & neededInt,unsigned & neededSSE,bool isNamedArg,bool IsRegCall) const2690 X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2691 unsigned &neededInt, unsigned &neededSSE,
2692 bool isNamedArg, bool IsRegCall) const {
2693 Ty = useFirstFieldIfTransparentUnion(Ty);
2694
2695 X86_64ABIInfo::Class Lo, Hi;
2696 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2697
2698 // Check some invariants.
2699 // FIXME: Enforce these by construction.
2700 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2701 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2702
2703 neededInt = 0;
2704 neededSSE = 0;
2705 llvm::Type *ResType = nullptr;
2706 switch (Lo) {
2707 case NoClass:
2708 if (Hi == NoClass)
2709 return ABIArgInfo::getIgnore();
2710 // If the low part is just padding, it takes no register, leave ResType
2711 // null.
2712 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2713 "Unknown missing lo part");
2714 break;
2715
2716 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2717 // on the stack.
2718 case Memory:
2719
2720 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2721 // COMPLEX_X87, it is passed in memory.
2722 case X87:
2723 case ComplexX87:
2724 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2725 ++neededInt;
2726 return getIndirectResult(Ty, freeIntRegs);
2727
2728 case SSEUp:
2729 case X87Up:
2730 llvm_unreachable("Invalid classification for lo word.");
2731
2732 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2733 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2734 // and %r9 is used.
2735 case Integer:
2736 ++neededInt;
2737
2738 // Pick an 8-byte type based on the preferred type.
2739 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2740
2741 // If we have a sign or zero extended integer, make sure to return Extend
2742 // so that the parameter gets the right LLVM IR attributes.
2743 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2744 // Treat an enum type as its underlying type.
2745 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2746 Ty = EnumTy->getDecl()->getIntegerType();
2747
2748 if (Ty->isIntegralOrEnumerationType() &&
2749 isPromotableIntegerTypeForABI(Ty))
2750 return ABIArgInfo::getExtend(Ty);
2751 }
2752
2753 break;
2754
2755 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2756 // available SSE register is used, the registers are taken in the
2757 // order from %xmm0 to %xmm7.
2758 case SSE: {
2759 llvm::Type *IRType = CGT.ConvertType(Ty);
2760 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2761 ++neededSSE;
2762 break;
2763 }
2764 }
2765
2766 llvm::Type *HighPart = nullptr;
2767 switch (Hi) {
2768 // Memory was handled previously, ComplexX87 and X87 should
2769 // never occur as hi classes, and X87Up must be preceded by X87,
2770 // which is passed in memory.
2771 case Memory:
2772 case X87:
2773 case ComplexX87:
2774 llvm_unreachable("Invalid classification for hi word.");
2775
2776 case NoClass: break;
2777
2778 case Integer:
2779 ++neededInt;
2780 // Pick an 8-byte type based on the preferred type.
2781 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2782
2783 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2784 return ABIArgInfo::getDirect(HighPart, 8);
2785 break;
2786
2787 // X87Up generally doesn't occur here (long double is passed in
2788 // memory), except in situations involving unions.
2789 case X87Up:
2790 case SSE:
2791 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2792
2793 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2794 return ABIArgInfo::getDirect(HighPart, 8);
2795
2796 ++neededSSE;
2797 break;
2798
2799 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2800 // eightbyte is passed in the upper half of the last used SSE
2801 // register. This only happens when 128-bit vectors are passed.
2802 case SSEUp:
2803 assert(Lo == SSE && "Unexpected SSEUp classification");
2804 ResType = GetByteVectorType(Ty);
2805 break;
2806 }
2807
2808 // If a high part was specified, merge it together with the low part. It is
2809 // known to pass in the high eightbyte of the result. We do this by forming a
2810 // first class struct aggregate with the high and low part: {low, high}
2811 if (HighPart)
2812 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2813
2814 return ABIArgInfo::getDirect(ResType);
2815 }
2816
2817 ABIArgInfo
classifyRegCallStructTypeImpl(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE,unsigned & MaxVectorWidth) const2818 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2819 unsigned &NeededSSE,
2820 unsigned &MaxVectorWidth) const {
2821 auto RT = Ty->getAs<RecordType>();
2822 assert(RT && "classifyRegCallStructType only valid with struct types");
2823
2824 if (RT->getDecl()->hasFlexibleArrayMember())
2825 return getIndirectReturnResult(Ty);
2826
2827 // Sum up bases
2828 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
2829 if (CXXRD->isDynamicClass()) {
2830 NeededInt = NeededSSE = 0;
2831 return getIndirectReturnResult(Ty);
2832 }
2833
2834 for (const auto &I : CXXRD->bases())
2835 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2836 MaxVectorWidth)
2837 .isIndirect()) {
2838 NeededInt = NeededSSE = 0;
2839 return getIndirectReturnResult(Ty);
2840 }
2841 }
2842
2843 // Sum up members
2844 for (const auto *FD : RT->getDecl()->fields()) {
2845 QualType MTy = FD->getType();
2846 if (MTy->isRecordType() && !MTy->isUnionType()) {
2847 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2848 MaxVectorWidth)
2849 .isIndirect()) {
2850 NeededInt = NeededSSE = 0;
2851 return getIndirectReturnResult(Ty);
2852 }
2853 } else {
2854 unsigned LocalNeededInt, LocalNeededSSE;
2855 if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
2856 true, true)
2857 .isIndirect()) {
2858 NeededInt = NeededSSE = 0;
2859 return getIndirectReturnResult(Ty);
2860 }
2861 if (const auto *AT = getContext().getAsConstantArrayType(MTy))
2862 MTy = AT->getElementType();
2863 if (const auto *VT = MTy->getAs<VectorType>())
2864 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2865 MaxVectorWidth = getContext().getTypeSize(VT);
2866 NeededInt += LocalNeededInt;
2867 NeededSSE += LocalNeededSSE;
2868 }
2869 }
2870
2871 return ABIArgInfo::getDirect();
2872 }
2873
2874 ABIArgInfo
classifyRegCallStructType(QualType Ty,unsigned & NeededInt,unsigned & NeededSSE,unsigned & MaxVectorWidth) const2875 X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2876 unsigned &NeededSSE,
2877 unsigned &MaxVectorWidth) const {
2878
2879 NeededInt = 0;
2880 NeededSSE = 0;
2881 MaxVectorWidth = 0;
2882
2883 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2884 MaxVectorWidth);
2885 }
2886
computeInfo(CGFunctionInfo & FI) const2887 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2888
2889 const unsigned CallingConv = FI.getCallingConvention();
2890 // It is possible to force Win64 calling convention on any x86_64 target by
2891 // using __attribute__((ms_abi)). In such case to correctly emit Win64
2892 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
2893 if (CallingConv == llvm::CallingConv::Win64) {
2894 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2895 Win64ABIInfo.computeInfo(FI);
2896 return;
2897 }
2898
2899 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
2900
2901 // Keep track of the number of assigned registers.
2902 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2903 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2904 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2905
2906 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
2907 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
2908 !FI.getReturnType()->getTypePtr()->isUnionType()) {
2909 FI.getReturnInfo() = classifyRegCallStructType(
2910 FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
2911 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2912 FreeIntRegs -= NeededInt;
2913 FreeSSERegs -= NeededSSE;
2914 } else {
2915 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
2916 }
2917 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
2918 getContext().getCanonicalType(FI.getReturnType()
2919 ->getAs<ComplexType>()
2920 ->getElementType()) ==
2921 getContext().LongDoubleTy)
2922 // Complex Long Double Type is passed in Memory when Regcall
2923 // calling convention is used.
2924 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
2925 else
2926 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2927 }
2928
2929 // If the return value is indirect, then the hidden argument is consuming one
2930 // integer register.
2931 if (FI.getReturnInfo().isIndirect())
2932 --FreeIntRegs;
2933 else if (NeededSSE && MaxVectorWidth > 0)
2934 FI.setMaxVectorWidth(MaxVectorWidth);
2935
2936 // The chain argument effectively gives us another free register.
2937 if (FI.isChainCall())
2938 ++FreeIntRegs;
2939
2940 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2941 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2942 // get assigned (in left-to-right order) for passing as follows...
2943 unsigned ArgNo = 0;
2944 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2945 it != ie; ++it, ++ArgNo) {
2946 bool IsNamedArg = ArgNo < NumRequiredArgs;
2947
2948 if (IsRegCall && it->type->isStructureOrClassType())
2949 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2950 MaxVectorWidth);
2951 else
2952 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
2953 NeededSSE, IsNamedArg);
2954
2955 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2956 // eightbyte of an argument, the whole argument is passed on the
2957 // stack. If registers have already been assigned for some
2958 // eightbytes of such an argument, the assignments get reverted.
2959 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2960 FreeIntRegs -= NeededInt;
2961 FreeSSERegs -= NeededSSE;
2962 if (MaxVectorWidth > FI.getMaxVectorWidth())
2963 FI.setMaxVectorWidth(MaxVectorWidth);
2964 } else {
2965 it->info = getIndirectResult(it->type, FreeIntRegs);
2966 }
2967 }
2968 }
2969
EmitX86_64VAArgFromMemory(CodeGenFunction & CGF,Address VAListAddr,QualType Ty)2970 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
2971 Address VAListAddr, QualType Ty) {
2972 Address overflow_arg_area_p =
2973 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2974 llvm::Value *overflow_arg_area =
2975 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2976
2977 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2978 // byte boundary if alignment needed by type exceeds 8 byte boundary.
2979 // It isn't stated explicitly in the standard, but in practice we use
2980 // alignment greater than 16 where necessary.
2981 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
2982 if (Align > CharUnits::fromQuantity(8)) {
2983 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
2984 Align);
2985 }
2986
2987 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2988 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2989 llvm::Value *Res = overflow_arg_area;
2990
2991 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2992 // l->overflow_arg_area + sizeof(type).
2993 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2994 // an 8 byte boundary.
2995
2996 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2997 llvm::Value *Offset =
2998 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
2999 overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
3000 Offset, "overflow_arg_area.next");
3001 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3002
3003 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3004 return Address(Res, LTy, Align);
3005 }
3006
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3007 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3008 QualType Ty) const {
3009 // Assume that va_list type is correct; should be pointer to LLVM type:
3010 // struct {
3011 // i32 gp_offset;
3012 // i32 fp_offset;
3013 // i8* overflow_arg_area;
3014 // i8* reg_save_area;
3015 // };
3016 unsigned neededInt, neededSSE;
3017
3018 Ty = getContext().getCanonicalType(Ty);
3019 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3020 /*isNamedArg*/false);
3021
3022 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3023 // in the registers. If not go to step 7.
3024 if (!neededInt && !neededSSE)
3025 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3026
3027 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3028 // general purpose registers needed to pass type and num_fp to hold
3029 // the number of floating point registers needed.
3030
3031 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3032 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3033 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3034 //
3035 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3036 // register save space).
3037
3038 llvm::Value *InRegs = nullptr;
3039 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3040 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3041 if (neededInt) {
3042 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3043 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3044 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3045 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3046 }
3047
3048 if (neededSSE) {
3049 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3050 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3051 llvm::Value *FitsInFP =
3052 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3053 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3054 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3055 }
3056
3057 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3058 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3059 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3060 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3061
3062 // Emit code to load the value if it was passed in registers.
3063
3064 CGF.EmitBlock(InRegBlock);
3065
3066 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3067 // an offset of l->gp_offset and/or l->fp_offset. This may require
3068 // copying to a temporary location in case the parameter is passed
3069 // in different register classes or requires an alignment greater
3070 // than 8 for general purpose registers and 16 for XMM registers.
3071 //
3072 // FIXME: This really results in shameful code when we end up needing to
3073 // collect arguments from different places; often what should result in a
3074 // simple assembling of a structure from scattered addresses has many more
3075 // loads than necessary. Can we clean this up?
3076 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3077 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3078 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
3079
3080 Address RegAddr = Address::invalid();
3081 if (neededInt && neededSSE) {
3082 // FIXME: Cleanup.
3083 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3084 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3085 Address Tmp = CGF.CreateMemTemp(Ty);
3086 Tmp = Tmp.withElementType(ST);
3087 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3088 llvm::Type *TyLo = ST->getElementType(0);
3089 llvm::Type *TyHi = ST->getElementType(1);
3090 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3091 "Unexpected ABI info for mixed regs");
3092 llvm::Value *GPAddr =
3093 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
3094 llvm::Value *FPAddr =
3095 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
3096 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3097 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3098
3099 // Copy the first element.
3100 // FIXME: Our choice of alignment here and below is probably pessimistic.
3101 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3102 TyLo, RegLoAddr,
3103 CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
3104 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3105
3106 // Copy the second element.
3107 V = CGF.Builder.CreateAlignedLoad(
3108 TyHi, RegHiAddr,
3109 CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
3110 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3111
3112 RegAddr = Tmp.withElementType(LTy);
3113 } else if (neededInt) {
3114 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
3115 LTy, CharUnits::fromQuantity(8));
3116
3117 // Copy to a temporary if necessary to ensure the appropriate alignment.
3118 auto TInfo = getContext().getTypeInfoInChars(Ty);
3119 uint64_t TySize = TInfo.Width.getQuantity();
3120 CharUnits TyAlign = TInfo.Align;
3121
3122 // Copy into a temporary if the type is more aligned than the
3123 // register save area.
3124 if (TyAlign.getQuantity() > 8) {
3125 Address Tmp = CGF.CreateMemTemp(Ty);
3126 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3127 RegAddr = Tmp;
3128 }
3129
3130 } else if (neededSSE == 1) {
3131 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
3132 LTy, CharUnits::fromQuantity(16));
3133 } else {
3134 assert(neededSSE == 2 && "Invalid number of needed registers!");
3135 // SSE registers are spaced 16 bytes apart in the register save
3136 // area, we need to collect the two eightbytes together.
3137 // The ABI isn't explicit about this, but it seems reasonable
3138 // to assume that the slots are 16-byte aligned, since the stack is
3139 // naturally 16-byte aligned and the prologue is expected to store
3140 // all the SSE registers to the RSA.
3141 Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
3142 fp_offset),
3143 CGF.Int8Ty, CharUnits::fromQuantity(16));
3144 Address RegAddrHi =
3145 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3146 CharUnits::fromQuantity(16));
3147 llvm::Type *ST = AI.canHaveCoerceToType()
3148 ? AI.getCoerceToType()
3149 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3150 llvm::Value *V;
3151 Address Tmp = CGF.CreateMemTemp(Ty);
3152 Tmp = Tmp.withElementType(ST);
3153 V = CGF.Builder.CreateLoad(
3154 RegAddrLo.withElementType(ST->getStructElementType(0)));
3155 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3156 V = CGF.Builder.CreateLoad(
3157 RegAddrHi.withElementType(ST->getStructElementType(1)));
3158 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3159
3160 RegAddr = Tmp.withElementType(LTy);
3161 }
3162
3163 // AMD64-ABI 3.5.7p5: Step 5. Set:
3164 // l->gp_offset = l->gp_offset + num_gp * 8
3165 // l->fp_offset = l->fp_offset + num_fp * 16.
3166 if (neededInt) {
3167 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3168 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3169 gp_offset_p);
3170 }
3171 if (neededSSE) {
3172 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3173 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3174 fp_offset_p);
3175 }
3176 CGF.EmitBranch(ContBlock);
3177
3178 // Emit code to load the value if it was passed in memory.
3179
3180 CGF.EmitBlock(InMemBlock);
3181 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3182
3183 // Return the appropriate result.
3184
3185 CGF.EmitBlock(ContBlock);
3186 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3187 "vaarg.addr");
3188 return ResAddr;
3189 }
3190
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3191 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3192 QualType Ty) const {
3193 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3194 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3195 uint64_t Width = getContext().getTypeSize(Ty);
3196 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3197
3198 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3199 CGF.getContext().getTypeInfoInChars(Ty),
3200 CharUnits::fromQuantity(8),
3201 /*allowHigherAlign*/ false);
3202 }
3203
reclassifyHvaArgForVectorCall(QualType Ty,unsigned & FreeSSERegs,const ABIArgInfo & current) const3204 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3205 QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo ¤t) const {
3206 const Type *Base = nullptr;
3207 uint64_t NumElts = 0;
3208
3209 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3210 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3211 FreeSSERegs -= NumElts;
3212 return getDirectX86Hva();
3213 }
3214 return current;
3215 }
3216
classify(QualType Ty,unsigned & FreeSSERegs,bool IsReturnType,bool IsVectorCall,bool IsRegCall) const3217 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3218 bool IsReturnType, bool IsVectorCall,
3219 bool IsRegCall) const {
3220
3221 if (Ty->isVoidType())
3222 return ABIArgInfo::getIgnore();
3223
3224 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3225 Ty = EnumTy->getDecl()->getIntegerType();
3226
3227 TypeInfo Info = getContext().getTypeInfo(Ty);
3228 uint64_t Width = Info.Width;
3229 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3230
3231 const RecordType *RT = Ty->getAs<RecordType>();
3232 if (RT) {
3233 if (!IsReturnType) {
3234 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3235 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3236 }
3237
3238 if (RT->getDecl()->hasFlexibleArrayMember())
3239 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3240
3241 }
3242
3243 const Type *Base = nullptr;
3244 uint64_t NumElts = 0;
3245 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3246 // other targets.
3247 if ((IsVectorCall || IsRegCall) &&
3248 isHomogeneousAggregate(Ty, Base, NumElts)) {
3249 if (IsRegCall) {
3250 if (FreeSSERegs >= NumElts) {
3251 FreeSSERegs -= NumElts;
3252 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3253 return ABIArgInfo::getDirect();
3254 return ABIArgInfo::getExpand();
3255 }
3256 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3257 } else if (IsVectorCall) {
3258 if (FreeSSERegs >= NumElts &&
3259 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3260 FreeSSERegs -= NumElts;
3261 return ABIArgInfo::getDirect();
3262 } else if (IsReturnType) {
3263 return ABIArgInfo::getExpand();
3264 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3265 // HVAs are delayed and reclassified in the 2nd step.
3266 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3267 }
3268 }
3269 }
3270
3271 if (Ty->isMemberPointerType()) {
3272 // If the member pointer is represented by an LLVM int or ptr, pass it
3273 // directly.
3274 llvm::Type *LLTy = CGT.ConvertType(Ty);
3275 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3276 return ABIArgInfo::getDirect();
3277 }
3278
3279 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3280 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3281 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3282 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3283 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3284
3285 // Otherwise, coerce it to a small integer.
3286 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3287 }
3288
3289 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3290 switch (BT->getKind()) {
3291 case BuiltinType::Bool:
3292 // Bool type is always extended to the ABI, other builtin types are not
3293 // extended.
3294 return ABIArgInfo::getExtend(Ty);
3295
3296 case BuiltinType::LongDouble:
3297 // Mingw64 GCC uses the old 80 bit extended precision floating point
3298 // unit. It passes them indirectly through memory.
3299 if (IsMingw64) {
3300 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3301 if (LDF == &llvm::APFloat::x87DoubleExtended())
3302 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3303 }
3304 break;
3305
3306 case BuiltinType::Int128:
3307 case BuiltinType::UInt128:
3308 // If it's a parameter type, the normal ABI rule is that arguments larger
3309 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
3310 // even though it isn't particularly efficient.
3311 if (!IsReturnType)
3312 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3313
3314 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
3315 // Clang matches them for compatibility.
3316 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
3317 llvm::Type::getInt64Ty(getVMContext()), 2));
3318
3319 default:
3320 break;
3321 }
3322 }
3323
3324 if (Ty->isBitIntType()) {
3325 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3326 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3327 // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
3328 // or 8 bytes anyway as long is it fits in them, so we don't have to check
3329 // the power of 2.
3330 if (Width <= 64)
3331 return ABIArgInfo::getDirect();
3332 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3333 }
3334
3335 return ABIArgInfo::getDirect();
3336 }
3337
computeInfo(CGFunctionInfo & FI) const3338 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3339 const unsigned CC = FI.getCallingConvention();
3340 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3341 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3342
3343 // If __attribute__((sysv_abi)) is in use, use the SysV argument
3344 // classification rules.
3345 if (CC == llvm::CallingConv::X86_64_SysV) {
3346 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3347 SysVABIInfo.computeInfo(FI);
3348 return;
3349 }
3350
3351 unsigned FreeSSERegs = 0;
3352 if (IsVectorCall) {
3353 // We can use up to 4 SSE return registers with vectorcall.
3354 FreeSSERegs = 4;
3355 } else if (IsRegCall) {
3356 // RegCall gives us 16 SSE registers.
3357 FreeSSERegs = 16;
3358 }
3359
3360 if (!getCXXABI().classifyReturnType(FI))
3361 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
3362 IsVectorCall, IsRegCall);
3363
3364 if (IsVectorCall) {
3365 // We can use up to 6 SSE register parameters with vectorcall.
3366 FreeSSERegs = 6;
3367 } else if (IsRegCall) {
3368 // RegCall gives us 16 SSE registers, we can reuse the return registers.
3369 FreeSSERegs = 16;
3370 }
3371
3372 unsigned ArgNum = 0;
3373 unsigned ZeroSSERegs = 0;
3374 for (auto &I : FI.arguments()) {
3375 // Vectorcall in x64 only permits the first 6 arguments to be passed as
3376 // XMM/YMM registers. After the sixth argument, pretend no vector
3377 // registers are left.
3378 unsigned *MaybeFreeSSERegs =
3379 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3380 I.info =
3381 classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
3382 ++ArgNum;
3383 }
3384
3385 if (IsVectorCall) {
3386 // For vectorcall, assign aggregate HVAs to any free vector registers in a
3387 // second pass.
3388 for (auto &I : FI.arguments())
3389 I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
3390 }
3391 }
3392
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3393 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3394 QualType Ty) const {
3395 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3396 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3397 uint64_t Width = getContext().getTypeSize(Ty);
3398 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3399
3400 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3401 CGF.getContext().getTypeInfoInChars(Ty),
3402 CharUnits::fromQuantity(8),
3403 /*allowHigherAlign*/ false);
3404 }
3405
createX86_32TargetCodeGenInfo(CodeGenModule & CGM,bool DarwinVectorABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)3406 std::unique_ptr<TargetCodeGenInfo> CodeGen::createX86_32TargetCodeGenInfo(
3407 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
3408 unsigned NumRegisterParameters, bool SoftFloatABI) {
3409 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3410 CGM.getTriple(), CGM.getCodeGenOpts());
3411 return std::make_unique<X86_32TargetCodeGenInfo>(
3412 CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3413 NumRegisterParameters, SoftFloatABI);
3414 }
3415
createWinX86_32TargetCodeGenInfo(CodeGenModule & CGM,bool DarwinVectorABI,bool Win32StructABI,unsigned NumRegisterParameters)3416 std::unique_ptr<TargetCodeGenInfo> CodeGen::createWinX86_32TargetCodeGenInfo(
3417 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
3418 unsigned NumRegisterParameters) {
3419 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3420 CGM.getTriple(), CGM.getCodeGenOpts());
3421 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3422 CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3423 NumRegisterParameters);
3424 }
3425
3426 std::unique_ptr<TargetCodeGenInfo>
createX86_64TargetCodeGenInfo(CodeGenModule & CGM,X86AVXABILevel AVXLevel)3427 CodeGen::createX86_64TargetCodeGenInfo(CodeGenModule &CGM,
3428 X86AVXABILevel AVXLevel) {
3429 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
3430 }
3431
3432 std::unique_ptr<TargetCodeGenInfo>
createWinX86_64TargetCodeGenInfo(CodeGenModule & CGM,X86AVXABILevel AVXLevel)3433 CodeGen::createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM,
3434 X86AVXABILevel AVXLevel) {
3435 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
3436 }
3437