1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "TargetInfo.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/Basic/Builtins.h" 23 #include "clang/Basic/CodeGenOptions.h" 24 #include "clang/Basic/DiagnosticFrontend.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/CodeGen/SwiftCallingConv.h" 27 #include "llvm/ADT/SmallBitVector.h" 28 #include "llvm/ADT/StringExtras.h" 29 #include "llvm/ADT/StringSwitch.h" 30 #include "llvm/ADT/Triple.h" 31 #include "llvm/ADT/Twine.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/IntrinsicsNVPTX.h" 34 #include "llvm/IR/IntrinsicsS390.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace clang; 41 using namespace CodeGen; 42 43 // Helper for coercing an aggregate argument or return value into an integer 44 // array of the same size (including padding) and alignment. This alternate 45 // coercion happens only for the RenderScript ABI and can be removed after 46 // runtimes that rely on it are no longer supported. 47 // 48 // RenderScript assumes that the size of the argument / return value in the IR 49 // is the same as the size of the corresponding qualified type. This helper 50 // coerces the aggregate type into an array of the same size (including 51 // padding). This coercion is used in lieu of expansion of struct members or 52 // other canonical coercions that return a coerced-type of larger size. 53 // 54 // Ty - The argument / return value type 55 // Context - The associated ASTContext 56 // LLVMContext - The associated LLVMContext 57 static ABIArgInfo coerceToIntArray(QualType Ty, 58 ASTContext &Context, 59 llvm::LLVMContext &LLVMContext) { 60 // Alignment and Size are measured in bits. 61 const uint64_t Size = Context.getTypeSize(Ty); 62 const uint64_t Alignment = Context.getTypeAlign(Ty); 63 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 64 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 65 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 66 } 67 68 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 69 llvm::Value *Array, 70 llvm::Value *Value, 71 unsigned FirstIndex, 72 unsigned LastIndex) { 73 // Alternatively, we could emit this as a loop in the source. 74 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 75 llvm::Value *Cell = 76 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 77 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 78 } 79 } 80 81 static bool isAggregateTypeForABI(QualType T) { 82 return !CodeGenFunction::hasScalarEvaluationKind(T) || 83 T->isMemberFunctionPointerType(); 84 } 85 86 ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, 87 bool Realign, 88 llvm::Type *Padding) const { 89 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, 90 Realign, Padding); 91 } 92 93 ABIArgInfo 94 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 95 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 96 /*ByVal*/ false, Realign); 97 } 98 99 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 100 QualType Ty) const { 101 return Address::invalid(); 102 } 103 104 static llvm::Type *getVAListElementType(CodeGenFunction &CGF) { 105 return CGF.ConvertTypeForMem( 106 CGF.getContext().getBuiltinVaListType()->getPointeeType()); 107 } 108 109 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 110 if (Ty->isPromotableIntegerType()) 111 return true; 112 113 if (const auto *EIT = Ty->getAs<BitIntType>()) 114 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) 115 return true; 116 117 return false; 118 } 119 120 ABIInfo::~ABIInfo() {} 121 122 /// Does the given lowering require more than the given number of 123 /// registers when expanded? 124 /// 125 /// This is intended to be the basis of a reasonable basic implementation 126 /// of should{Pass,Return}IndirectlyForSwift. 127 /// 128 /// For most targets, a limit of four total registers is reasonable; this 129 /// limits the amount of code required in order to move around the value 130 /// in case it wasn't produced immediately prior to the call by the caller 131 /// (or wasn't produced in exactly the right registers) or isn't used 132 /// immediately within the callee. But some targets may need to further 133 /// limit the register count due to an inability to support that many 134 /// return registers. 135 static bool occupiesMoreThan(CodeGenTypes &cgt, 136 ArrayRef<llvm::Type*> scalarTypes, 137 unsigned maxAllRegisters) { 138 unsigned intCount = 0, fpCount = 0; 139 for (llvm::Type *type : scalarTypes) { 140 if (type->isPointerTy()) { 141 intCount++; 142 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 143 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 144 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 145 } else { 146 assert(type->isVectorTy() || type->isFloatingPointTy()); 147 fpCount++; 148 } 149 } 150 151 return (intCount + fpCount > maxAllRegisters); 152 } 153 154 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 155 llvm::Type *eltTy, 156 unsigned numElts) const { 157 // The default implementation of this assumes that the target guarantees 158 // 128-bit SIMD support but nothing more. 159 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 160 } 161 162 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 163 CGCXXABI &CXXABI) { 164 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 165 if (!RD) { 166 if (!RT->getDecl()->canPassInRegisters()) 167 return CGCXXABI::RAA_Indirect; 168 return CGCXXABI::RAA_Default; 169 } 170 return CXXABI.getRecordArgABI(RD); 171 } 172 173 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 174 CGCXXABI &CXXABI) { 175 const RecordType *RT = T->getAs<RecordType>(); 176 if (!RT) 177 return CGCXXABI::RAA_Default; 178 return getRecordArgABI(RT, CXXABI); 179 } 180 181 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, 182 const ABIInfo &Info) { 183 QualType Ty = FI.getReturnType(); 184 185 if (const auto *RT = Ty->getAs<RecordType>()) 186 if (!isa<CXXRecordDecl>(RT->getDecl()) && 187 !RT->getDecl()->canPassInRegisters()) { 188 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); 189 return true; 190 } 191 192 return CXXABI.classifyReturnType(FI); 193 } 194 195 /// Pass transparent unions as if they were the type of the first element. Sema 196 /// should ensure that all elements of the union have the same "machine type". 197 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 198 if (const RecordType *UT = Ty->getAsUnionType()) { 199 const RecordDecl *UD = UT->getDecl(); 200 if (UD->hasAttr<TransparentUnionAttr>()) { 201 assert(!UD->field_empty() && "sema created an empty transparent union"); 202 return UD->field_begin()->getType(); 203 } 204 } 205 return Ty; 206 } 207 208 CGCXXABI &ABIInfo::getCXXABI() const { 209 return CGT.getCXXABI(); 210 } 211 212 ASTContext &ABIInfo::getContext() const { 213 return CGT.getContext(); 214 } 215 216 llvm::LLVMContext &ABIInfo::getVMContext() const { 217 return CGT.getLLVMContext(); 218 } 219 220 const llvm::DataLayout &ABIInfo::getDataLayout() const { 221 return CGT.getDataLayout(); 222 } 223 224 const TargetInfo &ABIInfo::getTarget() const { 225 return CGT.getTarget(); 226 } 227 228 const CodeGenOptions &ABIInfo::getCodeGenOpts() const { 229 return CGT.getCodeGenOpts(); 230 } 231 232 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } 233 234 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 235 return false; 236 } 237 238 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 239 uint64_t Members) const { 240 return false; 241 } 242 243 bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { 244 // For compatibility with GCC, ignore empty bitfields in C++ mode. 245 return getContext().getLangOpts().CPlusPlus; 246 } 247 248 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 249 raw_ostream &OS = llvm::errs(); 250 OS << "(ABIArgInfo Kind="; 251 switch (TheKind) { 252 case Direct: 253 OS << "Direct Type="; 254 if (llvm::Type *Ty = getCoerceToType()) 255 Ty->print(OS); 256 else 257 OS << "null"; 258 break; 259 case Extend: 260 OS << "Extend"; 261 break; 262 case Ignore: 263 OS << "Ignore"; 264 break; 265 case InAlloca: 266 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 267 break; 268 case Indirect: 269 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 270 << " ByVal=" << getIndirectByVal() 271 << " Realign=" << getIndirectRealign(); 272 break; 273 case IndirectAliased: 274 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 275 << " AadrSpace=" << getIndirectAddrSpace() 276 << " Realign=" << getIndirectRealign(); 277 break; 278 case Expand: 279 OS << "Expand"; 280 break; 281 case CoerceAndExpand: 282 OS << "CoerceAndExpand Type="; 283 getCoerceAndExpandType()->print(OS); 284 break; 285 } 286 OS << ")\n"; 287 } 288 289 // Dynamically round a pointer up to a multiple of the given alignment. 290 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 291 llvm::Value *Ptr, 292 CharUnits Align) { 293 llvm::Value *PtrAsInt = Ptr; 294 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 295 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 296 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 297 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 298 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 299 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 300 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 301 Ptr->getType(), 302 Ptr->getName() + ".aligned"); 303 return PtrAsInt; 304 } 305 306 /// Emit va_arg for a platform using the common void* representation, 307 /// where arguments are simply emitted in an array of slots on the stack. 308 /// 309 /// This version implements the core direct-value passing rules. 310 /// 311 /// \param SlotSize - The size and alignment of a stack slot. 312 /// Each argument will be allocated to a multiple of this number of 313 /// slots, and all the slots will be aligned to this value. 314 /// \param AllowHigherAlign - The slot alignment is not a cap; 315 /// an argument type with an alignment greater than the slot size 316 /// will be emitted on a higher-alignment address, potentially 317 /// leaving one or more empty slots behind as padding. If this 318 /// is false, the returned address might be less-aligned than 319 /// DirectAlign. 320 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 321 Address VAListAddr, 322 llvm::Type *DirectTy, 323 CharUnits DirectSize, 324 CharUnits DirectAlign, 325 CharUnits SlotSize, 326 bool AllowHigherAlign) { 327 // Cast the element type to i8* if necessary. Some platforms define 328 // va_list as a struct containing an i8* instead of just an i8*. 329 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 330 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 331 332 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 333 334 // If the CC aligns values higher than the slot size, do so if needed. 335 Address Addr = Address::invalid(); 336 if (AllowHigherAlign && DirectAlign > SlotSize) { 337 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 338 CGF.Int8Ty, DirectAlign); 339 } else { 340 Addr = Address(Ptr, CGF.Int8Ty, SlotSize); 341 } 342 343 // Advance the pointer past the argument, then store that back. 344 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 345 Address NextPtr = 346 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); 347 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 348 349 // If the argument is smaller than a slot, and this is a big-endian 350 // target, the argument will be right-adjusted in its slot. 351 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 352 !DirectTy->isStructTy()) { 353 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 354 } 355 356 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 357 return Addr; 358 } 359 360 /// Emit va_arg for a platform using the common void* representation, 361 /// where arguments are simply emitted in an array of slots on the stack. 362 /// 363 /// \param IsIndirect - Values of this type are passed indirectly. 364 /// \param ValueInfo - The size and alignment of this type, generally 365 /// computed with getContext().getTypeInfoInChars(ValueTy). 366 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 367 /// Each argument will be allocated to a multiple of this number of 368 /// slots, and all the slots will be aligned to this value. 369 /// \param AllowHigherAlign - The slot alignment is not a cap; 370 /// an argument type with an alignment greater than the slot size 371 /// will be emitted on a higher-alignment address, potentially 372 /// leaving one or more empty slots behind as padding. 373 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 374 QualType ValueTy, bool IsIndirect, 375 TypeInfoChars ValueInfo, 376 CharUnits SlotSizeAndAlign, 377 bool AllowHigherAlign) { 378 // The size and alignment of the value that was passed directly. 379 CharUnits DirectSize, DirectAlign; 380 if (IsIndirect) { 381 DirectSize = CGF.getPointerSize(); 382 DirectAlign = CGF.getPointerAlign(); 383 } else { 384 DirectSize = ValueInfo.Width; 385 DirectAlign = ValueInfo.Align; 386 } 387 388 // Cast the address we've calculated to the right type. 389 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy; 390 if (IsIndirect) 391 DirectTy = DirectTy->getPointerTo(0); 392 393 Address Addr = 394 emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize, DirectAlign, 395 SlotSizeAndAlign, AllowHigherAlign); 396 397 if (IsIndirect) { 398 Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align); 399 } 400 401 return Addr; 402 } 403 404 static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr, 405 QualType Ty, CharUnits SlotSize, 406 CharUnits EltSize, const ComplexType *CTy) { 407 Address Addr = 408 emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2, 409 SlotSize, SlotSize, /*AllowHigher*/ true); 410 411 Address RealAddr = Addr; 412 Address ImagAddr = RealAddr; 413 if (CGF.CGM.getDataLayout().isBigEndian()) { 414 RealAddr = 415 CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize); 416 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 417 2 * SlotSize - EltSize); 418 } else { 419 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 420 } 421 422 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 423 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 424 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 425 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 426 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 427 428 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 429 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 430 /*init*/ true); 431 return Temp; 432 } 433 434 static Address emitMergePHI(CodeGenFunction &CGF, 435 Address Addr1, llvm::BasicBlock *Block1, 436 Address Addr2, llvm::BasicBlock *Block2, 437 const llvm::Twine &Name = "") { 438 assert(Addr1.getType() == Addr2.getType()); 439 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 440 PHI->addIncoming(Addr1.getPointer(), Block1); 441 PHI->addIncoming(Addr2.getPointer(), Block2); 442 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 443 return Address(PHI, Addr1.getElementType(), Align); 444 } 445 446 TargetCodeGenInfo::TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info) 447 : Info(std::move(Info)) {} 448 449 TargetCodeGenInfo::~TargetCodeGenInfo() = default; 450 451 // If someone can figure out a general rule for this, that would be great. 452 // It's probably just doomed to be platform-dependent, though. 453 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 454 // Verified for: 455 // x86-64 FreeBSD, Linux, Darwin 456 // x86-32 FreeBSD, Linux, Darwin 457 // PowerPC Linux, Darwin 458 // ARM Darwin (*not* EABI) 459 // AArch64 Linux 460 return 32; 461 } 462 463 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 464 const FunctionNoProtoType *fnType) const { 465 // The following conventions are known to require this to be false: 466 // x86_stdcall 467 // MIPS 468 // For everything else, we just prefer false unless we opt out. 469 return false; 470 } 471 472 void 473 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 474 llvm::SmallString<24> &Opt) const { 475 // This assumes the user is passing a library name like "rt" instead of a 476 // filename like "librt.a/so", and that they don't care whether it's static or 477 // dynamic. 478 Opt = "-l"; 479 Opt += Lib; 480 } 481 482 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 483 // OpenCL kernels are called via an explicit runtime API with arguments 484 // set with clSetKernelArg(), not as normal sub-functions. 485 // Return SPIR_KERNEL by default as the kernel calling convention to 486 // ensure the fingerprint is fixed such way that each OpenCL argument 487 // gets one matching argument in the produced kernel function argument 488 // list to enable feasible implementation of clSetKernelArg() with 489 // aggregates etc. In case we would use the default C calling conv here, 490 // clSetKernelArg() might break depending on the target-specific 491 // conventions; different targets might split structs passed as values 492 // to multiple function arguments etc. 493 return llvm::CallingConv::SPIR_KERNEL; 494 } 495 496 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 497 llvm::PointerType *T, QualType QT) const { 498 return llvm::ConstantPointerNull::get(T); 499 } 500 501 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 502 const VarDecl *D) const { 503 assert(!CGM.getLangOpts().OpenCL && 504 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 505 "Address space agnostic languages only"); 506 return D ? D->getType().getAddressSpace() : LangAS::Default; 507 } 508 509 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 510 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, 511 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { 512 // Since target may map different address spaces in AST to the same address 513 // space, an address space conversion may end up as a bitcast. 514 if (auto *C = dyn_cast<llvm::Constant>(Src)) 515 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 516 // Try to preserve the source's name to make IR more readable. 517 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 518 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); 519 } 520 521 llvm::Constant * 522 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 523 LangAS SrcAddr, LangAS DestAddr, 524 llvm::Type *DestTy) const { 525 // Since target may map different address spaces in AST to the same address 526 // space, an address space conversion may end up as a bitcast. 527 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 528 } 529 530 llvm::SyncScope::ID 531 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 532 SyncScope Scope, 533 llvm::AtomicOrdering Ordering, 534 llvm::LLVMContext &Ctx) const { 535 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */ 536 } 537 538 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 539 540 /// isEmptyField - Return true iff a the field is "empty", that is it 541 /// is an unnamed bit-field or an (array of) empty record(s). 542 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 543 bool AllowArrays) { 544 if (FD->isUnnamedBitfield()) 545 return true; 546 547 QualType FT = FD->getType(); 548 549 // Constant arrays of empty records count as empty, strip them off. 550 // Constant arrays of zero length always count as empty. 551 bool WasArray = false; 552 if (AllowArrays) 553 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 554 if (AT->getSize() == 0) 555 return true; 556 FT = AT->getElementType(); 557 // The [[no_unique_address]] special case below does not apply to 558 // arrays of C++ empty records, so we need to remember this fact. 559 WasArray = true; 560 } 561 562 const RecordType *RT = FT->getAs<RecordType>(); 563 if (!RT) 564 return false; 565 566 // C++ record fields are never empty, at least in the Itanium ABI. 567 // 568 // FIXME: We should use a predicate for whether this behavior is true in the 569 // current ABI. 570 // 571 // The exception to the above rule are fields marked with the 572 // [[no_unique_address]] attribute (since C++20). Those do count as empty 573 // according to the Itanium ABI. The exception applies only to records, 574 // not arrays of records, so we must also check whether we stripped off an 575 // array type above. 576 if (isa<CXXRecordDecl>(RT->getDecl()) && 577 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>())) 578 return false; 579 580 return isEmptyRecord(Context, FT, AllowArrays); 581 } 582 583 /// isEmptyRecord - Return true iff a structure contains only empty 584 /// fields. Note that a structure with a flexible array member is not 585 /// considered empty. 586 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 587 const RecordType *RT = T->getAs<RecordType>(); 588 if (!RT) 589 return false; 590 const RecordDecl *RD = RT->getDecl(); 591 if (RD->hasFlexibleArrayMember()) 592 return false; 593 594 // If this is a C++ record, check the bases first. 595 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 596 for (const auto &I : CXXRD->bases()) 597 if (!isEmptyRecord(Context, I.getType(), true)) 598 return false; 599 600 for (const auto *I : RD->fields()) 601 if (!isEmptyField(Context, I, AllowArrays)) 602 return false; 603 return true; 604 } 605 606 /// isSingleElementStruct - Determine if a structure is a "single 607 /// element struct", i.e. it has exactly one non-empty field or 608 /// exactly one field which is itself a single element 609 /// struct. Structures with flexible array members are never 610 /// considered single element structs. 611 /// 612 /// \return The field declaration for the single non-empty field, if 613 /// it exists. 614 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 615 const RecordType *RT = T->getAs<RecordType>(); 616 if (!RT) 617 return nullptr; 618 619 const RecordDecl *RD = RT->getDecl(); 620 if (RD->hasFlexibleArrayMember()) 621 return nullptr; 622 623 const Type *Found = nullptr; 624 625 // If this is a C++ record, check the bases first. 626 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 627 for (const auto &I : CXXRD->bases()) { 628 // Ignore empty records. 629 if (isEmptyRecord(Context, I.getType(), true)) 630 continue; 631 632 // If we already found an element then this isn't a single-element struct. 633 if (Found) 634 return nullptr; 635 636 // If this is non-empty and not a single element struct, the composite 637 // cannot be a single element struct. 638 Found = isSingleElementStruct(I.getType(), Context); 639 if (!Found) 640 return nullptr; 641 } 642 } 643 644 // Check for single element. 645 for (const auto *FD : RD->fields()) { 646 QualType FT = FD->getType(); 647 648 // Ignore empty fields. 649 if (isEmptyField(Context, FD, true)) 650 continue; 651 652 // If we already found an element then this isn't a single-element 653 // struct. 654 if (Found) 655 return nullptr; 656 657 // Treat single element arrays as the element. 658 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 659 if (AT->getSize().getZExtValue() != 1) 660 break; 661 FT = AT->getElementType(); 662 } 663 664 if (!isAggregateTypeForABI(FT)) { 665 Found = FT.getTypePtr(); 666 } else { 667 Found = isSingleElementStruct(FT, Context); 668 if (!Found) 669 return nullptr; 670 } 671 } 672 673 // We don't consider a struct a single-element struct if it has 674 // padding beyond the element type. 675 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 676 return nullptr; 677 678 return Found; 679 } 680 681 namespace { 682 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 683 const ABIArgInfo &AI) { 684 // This default implementation defers to the llvm backend's va_arg 685 // instruction. It can handle only passing arguments directly 686 // (typically only handled in the backend for primitive types), or 687 // aggregates passed indirectly by pointer (NOTE: if the "byval" 688 // flag has ABI impact in the callee, this implementation cannot 689 // work.) 690 691 // Only a few cases are covered here at the moment -- those needed 692 // by the default abi. 693 llvm::Value *Val; 694 695 if (AI.isIndirect()) { 696 assert(!AI.getPaddingType() && 697 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 698 assert( 699 !AI.getIndirectRealign() && 700 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 701 702 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 703 CharUnits TyAlignForABI = TyInfo.Align; 704 705 llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty); 706 llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy); 707 llvm::Value *Addr = 708 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 709 return Address(Addr, ElementTy, TyAlignForABI); 710 } else { 711 assert((AI.isDirect() || AI.isExtend()) && 712 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 713 714 assert(!AI.getInReg() && 715 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 716 assert(!AI.getPaddingType() && 717 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 718 assert(!AI.getDirectOffset() && 719 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 720 assert(!AI.getCoerceToType() && 721 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 722 723 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 724 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), 725 CGF.ConvertTypeForMem(Ty)); 726 CGF.Builder.CreateStore(Val, Temp); 727 return Temp; 728 } 729 } 730 731 /// DefaultABIInfo - The default implementation for ABI specific 732 /// details. This implementation provides information which results in 733 /// self-consistent and sensible LLVM IR generation, but does not 734 /// conform to any particular ABI. 735 class DefaultABIInfo : public ABIInfo { 736 public: 737 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 738 739 ABIArgInfo classifyReturnType(QualType RetTy) const; 740 ABIArgInfo classifyArgumentType(QualType RetTy) const; 741 742 void computeInfo(CGFunctionInfo &FI) const override { 743 if (!getCXXABI().classifyReturnType(FI)) 744 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 745 for (auto &I : FI.arguments()) 746 I.info = classifyArgumentType(I.type); 747 } 748 749 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 750 QualType Ty) const override { 751 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 752 } 753 }; 754 755 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 756 public: 757 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 758 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 759 }; 760 761 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 762 Ty = useFirstFieldIfTransparentUnion(Ty); 763 764 if (isAggregateTypeForABI(Ty)) { 765 // Records with non-trivial destructors/copy-constructors should not be 766 // passed by value. 767 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 768 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 769 770 return getNaturalAlignIndirect(Ty); 771 } 772 773 // Treat an enum type as its underlying type. 774 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 775 Ty = EnumTy->getDecl()->getIntegerType(); 776 777 ASTContext &Context = getContext(); 778 if (const auto *EIT = Ty->getAs<BitIntType>()) 779 if (EIT->getNumBits() > 780 Context.getTypeSize(Context.getTargetInfo().hasInt128Type() 781 ? Context.Int128Ty 782 : Context.LongLongTy)) 783 return getNaturalAlignIndirect(Ty); 784 785 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 786 : ABIArgInfo::getDirect()); 787 } 788 789 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 790 if (RetTy->isVoidType()) 791 return ABIArgInfo::getIgnore(); 792 793 if (isAggregateTypeForABI(RetTy)) 794 return getNaturalAlignIndirect(RetTy); 795 796 // Treat an enum type as its underlying type. 797 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 798 RetTy = EnumTy->getDecl()->getIntegerType(); 799 800 if (const auto *EIT = RetTy->getAs<BitIntType>()) 801 if (EIT->getNumBits() > 802 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type() 803 ? getContext().Int128Ty 804 : getContext().LongLongTy)) 805 return getNaturalAlignIndirect(RetTy); 806 807 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 808 : ABIArgInfo::getDirect()); 809 } 810 811 //===----------------------------------------------------------------------===// 812 // WebAssembly ABI Implementation 813 // 814 // This is a very simple ABI that relies a lot on DefaultABIInfo. 815 //===----------------------------------------------------------------------===// 816 817 class WebAssemblyABIInfo final : public SwiftABIInfo { 818 public: 819 enum ABIKind { 820 MVP = 0, 821 ExperimentalMV = 1, 822 }; 823 824 private: 825 DefaultABIInfo defaultInfo; 826 ABIKind Kind; 827 828 public: 829 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 830 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {} 831 832 private: 833 ABIArgInfo classifyReturnType(QualType RetTy) const; 834 ABIArgInfo classifyArgumentType(QualType Ty) const; 835 836 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 837 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 838 // overload them. 839 void computeInfo(CGFunctionInfo &FI) const override { 840 if (!getCXXABI().classifyReturnType(FI)) 841 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 842 for (auto &Arg : FI.arguments()) 843 Arg.info = classifyArgumentType(Arg.type); 844 } 845 846 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 847 QualType Ty) const override; 848 849 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 850 bool asReturnValue) const override { 851 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 852 } 853 854 bool isSwiftErrorInRegister() const override { 855 return false; 856 } 857 }; 858 859 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 860 public: 861 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 862 WebAssemblyABIInfo::ABIKind K) 863 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {} 864 865 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 866 CodeGen::CodeGenModule &CGM) const override { 867 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 868 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 869 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { 870 llvm::Function *Fn = cast<llvm::Function>(GV); 871 llvm::AttrBuilder B(GV->getContext()); 872 B.addAttribute("wasm-import-module", Attr->getImportModule()); 873 Fn->addFnAttrs(B); 874 } 875 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { 876 llvm::Function *Fn = cast<llvm::Function>(GV); 877 llvm::AttrBuilder B(GV->getContext()); 878 B.addAttribute("wasm-import-name", Attr->getImportName()); 879 Fn->addFnAttrs(B); 880 } 881 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) { 882 llvm::Function *Fn = cast<llvm::Function>(GV); 883 llvm::AttrBuilder B(GV->getContext()); 884 B.addAttribute("wasm-export-name", Attr->getExportName()); 885 Fn->addFnAttrs(B); 886 } 887 } 888 889 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 890 llvm::Function *Fn = cast<llvm::Function>(GV); 891 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) 892 Fn->addFnAttr("no-prototype"); 893 } 894 } 895 }; 896 897 /// Classify argument of given type \p Ty. 898 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 899 Ty = useFirstFieldIfTransparentUnion(Ty); 900 901 if (isAggregateTypeForABI(Ty)) { 902 // Records with non-trivial destructors/copy-constructors should not be 903 // passed by value. 904 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 905 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 906 // Ignore empty structs/unions. 907 if (isEmptyRecord(getContext(), Ty, true)) 908 return ABIArgInfo::getIgnore(); 909 // Lower single-element structs to just pass a regular value. TODO: We 910 // could do reasonable-size multiple-element structs too, using getExpand(), 911 // though watch out for things like bitfields. 912 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 913 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 914 // For the experimental multivalue ABI, fully expand all other aggregates 915 if (Kind == ABIKind::ExperimentalMV) { 916 const RecordType *RT = Ty->getAs<RecordType>(); 917 assert(RT); 918 bool HasBitField = false; 919 for (auto *Field : RT->getDecl()->fields()) { 920 if (Field->isBitField()) { 921 HasBitField = true; 922 break; 923 } 924 } 925 if (!HasBitField) 926 return ABIArgInfo::getExpand(); 927 } 928 } 929 930 // Otherwise just do the default thing. 931 return defaultInfo.classifyArgumentType(Ty); 932 } 933 934 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 935 if (isAggregateTypeForABI(RetTy)) { 936 // Records with non-trivial destructors/copy-constructors should not be 937 // returned by value. 938 if (!getRecordArgABI(RetTy, getCXXABI())) { 939 // Ignore empty structs/unions. 940 if (isEmptyRecord(getContext(), RetTy, true)) 941 return ABIArgInfo::getIgnore(); 942 // Lower single-element structs to just return a regular value. TODO: We 943 // could do reasonable-size multiple-element structs too, using 944 // ABIArgInfo::getDirect(). 945 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 946 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 947 // For the experimental multivalue ABI, return all other aggregates 948 if (Kind == ABIKind::ExperimentalMV) 949 return ABIArgInfo::getDirect(); 950 } 951 } 952 953 // Otherwise just do the default thing. 954 return defaultInfo.classifyReturnType(RetTy); 955 } 956 957 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 958 QualType Ty) const { 959 bool IsIndirect = isAggregateTypeForABI(Ty) && 960 !isEmptyRecord(getContext(), Ty, true) && 961 !isSingleElementStruct(Ty, getContext()); 962 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 963 getContext().getTypeInfoInChars(Ty), 964 CharUnits::fromQuantity(4), 965 /*AllowHigherAlign=*/true); 966 } 967 968 //===----------------------------------------------------------------------===// 969 // le32/PNaCl bitcode ABI Implementation 970 // 971 // This is a simplified version of the x86_32 ABI. Arguments and return values 972 // are always passed on the stack. 973 //===----------------------------------------------------------------------===// 974 975 class PNaClABIInfo : public ABIInfo { 976 public: 977 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 978 979 ABIArgInfo classifyReturnType(QualType RetTy) const; 980 ABIArgInfo classifyArgumentType(QualType RetTy) const; 981 982 void computeInfo(CGFunctionInfo &FI) const override; 983 Address EmitVAArg(CodeGenFunction &CGF, 984 Address VAListAddr, QualType Ty) const override; 985 }; 986 987 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 988 public: 989 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 990 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {} 991 }; 992 993 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 994 if (!getCXXABI().classifyReturnType(FI)) 995 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 996 997 for (auto &I : FI.arguments()) 998 I.info = classifyArgumentType(I.type); 999 } 1000 1001 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1002 QualType Ty) const { 1003 // The PNaCL ABI is a bit odd, in that varargs don't use normal 1004 // function classification. Structs get passed directly for varargs 1005 // functions, through a rewriting transform in 1006 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 1007 // this target to actually support a va_arg instructions with an 1008 // aggregate type, unlike other targets. 1009 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 1010 } 1011 1012 /// Classify argument of given type \p Ty. 1013 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 1014 if (isAggregateTypeForABI(Ty)) { 1015 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 1016 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 1017 return getNaturalAlignIndirect(Ty); 1018 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 1019 // Treat an enum type as its underlying type. 1020 Ty = EnumTy->getDecl()->getIntegerType(); 1021 } else if (Ty->isFloatingType()) { 1022 // Floating-point types don't go inreg. 1023 return ABIArgInfo::getDirect(); 1024 } else if (const auto *EIT = Ty->getAs<BitIntType>()) { 1025 // Treat bit-precise integers as integers if <= 64, otherwise pass 1026 // indirectly. 1027 if (EIT->getNumBits() > 64) 1028 return getNaturalAlignIndirect(Ty); 1029 return ABIArgInfo::getDirect(); 1030 } 1031 1032 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 1033 : ABIArgInfo::getDirect()); 1034 } 1035 1036 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 1037 if (RetTy->isVoidType()) 1038 return ABIArgInfo::getIgnore(); 1039 1040 // In the PNaCl ABI we always return records/structures on the stack. 1041 if (isAggregateTypeForABI(RetTy)) 1042 return getNaturalAlignIndirect(RetTy); 1043 1044 // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly. 1045 if (const auto *EIT = RetTy->getAs<BitIntType>()) { 1046 if (EIT->getNumBits() > 64) 1047 return getNaturalAlignIndirect(RetTy); 1048 return ABIArgInfo::getDirect(); 1049 } 1050 1051 // Treat an enum type as its underlying type. 1052 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1053 RetTy = EnumTy->getDecl()->getIntegerType(); 1054 1055 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1056 : ABIArgInfo::getDirect()); 1057 } 1058 1059 /// IsX86_MMXType - Return true if this is an MMX type. 1060 bool IsX86_MMXType(llvm::Type *IRType) { 1061 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 1062 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 1063 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 1064 IRType->getScalarSizeInBits() != 64; 1065 } 1066 1067 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1068 StringRef Constraint, 1069 llvm::Type* Ty) { 1070 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) 1071 .Cases("y", "&y", "^Ym", true) 1072 .Default(false); 1073 if (IsMMXCons && Ty->isVectorTy()) { 1074 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != 1075 64) { 1076 // Invalid MMX constraint 1077 return nullptr; 1078 } 1079 1080 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 1081 } 1082 1083 // No operation needed 1084 return Ty; 1085 } 1086 1087 /// Returns true if this type can be passed in SSE registers with the 1088 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1089 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 1090 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1091 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 1092 if (BT->getKind() == BuiltinType::LongDouble) { 1093 if (&Context.getTargetInfo().getLongDoubleFormat() == 1094 &llvm::APFloat::x87DoubleExtended()) 1095 return false; 1096 } 1097 return true; 1098 } 1099 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 1100 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 1101 // registers specially. 1102 unsigned VecSize = Context.getTypeSize(VT); 1103 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 1104 return true; 1105 } 1106 return false; 1107 } 1108 1109 /// Returns true if this aggregate is small enough to be passed in SSE registers 1110 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1111 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 1112 return NumMembers <= 4; 1113 } 1114 1115 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 1116 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 1117 auto AI = ABIArgInfo::getDirect(T); 1118 AI.setInReg(true); 1119 AI.setCanBeFlattened(false); 1120 return AI; 1121 } 1122 1123 //===----------------------------------------------------------------------===// 1124 // X86-32 ABI Implementation 1125 //===----------------------------------------------------------------------===// 1126 1127 /// Similar to llvm::CCState, but for Clang. 1128 struct CCState { 1129 CCState(CGFunctionInfo &FI) 1130 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} 1131 1132 llvm::SmallBitVector IsPreassigned; 1133 unsigned CC = CallingConv::CC_C; 1134 unsigned FreeRegs = 0; 1135 unsigned FreeSSERegs = 0; 1136 }; 1137 1138 /// X86_32ABIInfo - The X86-32 ABI information. 1139 class X86_32ABIInfo : public SwiftABIInfo { 1140 enum Class { 1141 Integer, 1142 Float 1143 }; 1144 1145 static const unsigned MinABIStackAlignInBytes = 4; 1146 1147 bool IsDarwinVectorABI; 1148 bool IsRetSmallStructInRegABI; 1149 bool IsWin32StructABI; 1150 bool IsSoftFloatABI; 1151 bool IsMCUABI; 1152 bool IsLinuxABI; 1153 unsigned DefaultNumRegisterParameters; 1154 1155 static bool isRegisterSize(unsigned Size) { 1156 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 1157 } 1158 1159 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1160 // FIXME: Assumes vectorcall is in use. 1161 return isX86VectorTypeForVectorCall(getContext(), Ty); 1162 } 1163 1164 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1165 uint64_t NumMembers) const override { 1166 // FIXME: Assumes vectorcall is in use. 1167 return isX86VectorCallAggregateSmallEnough(NumMembers); 1168 } 1169 1170 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 1171 1172 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1173 /// such that the argument will be passed in memory. 1174 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 1175 1176 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 1177 1178 /// Return the alignment to use for the given type on the stack. 1179 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 1180 1181 Class classify(QualType Ty) const; 1182 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 1183 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 1184 1185 /// Updates the number of available free registers, returns 1186 /// true if any registers were allocated. 1187 bool updateFreeRegs(QualType Ty, CCState &State) const; 1188 1189 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 1190 bool &NeedsPadding) const; 1191 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 1192 1193 bool canExpandIndirectArgument(QualType Ty) const; 1194 1195 /// Rewrite the function info so that all memory arguments use 1196 /// inalloca. 1197 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 1198 1199 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1200 CharUnits &StackOffset, ABIArgInfo &Info, 1201 QualType Type) const; 1202 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; 1203 1204 public: 1205 1206 void computeInfo(CGFunctionInfo &FI) const override; 1207 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1208 QualType Ty) const override; 1209 1210 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1211 bool RetSmallStructInRegABI, bool Win32StructABI, 1212 unsigned NumRegisterParameters, bool SoftFloatABI) 1213 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1214 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1215 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI), 1216 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1217 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() || 1218 CGT.getTarget().getTriple().isOSCygMing()), 1219 DefaultNumRegisterParameters(NumRegisterParameters) {} 1220 1221 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 1222 bool asReturnValue) const override { 1223 // LLVM's x86-32 lowering currently only assigns up to three 1224 // integer registers and three fp registers. Oddly, it'll use up to 1225 // four vector registers for vectors, but those can overlap with the 1226 // scalar registers. 1227 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1228 } 1229 1230 bool isSwiftErrorInRegister() const override { 1231 // x86-32 lowering does not support passing swifterror in a register. 1232 return false; 1233 } 1234 }; 1235 1236 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1237 public: 1238 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1239 bool RetSmallStructInRegABI, bool Win32StructABI, 1240 unsigned NumRegisterParameters, bool SoftFloatABI) 1241 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>( 1242 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1243 NumRegisterParameters, SoftFloatABI)) {} 1244 1245 static bool isStructReturnInRegABI( 1246 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1247 1248 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1249 CodeGen::CodeGenModule &CGM) const override; 1250 1251 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1252 // Darwin uses different dwarf register numbers for EH. 1253 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1254 return 4; 1255 } 1256 1257 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1258 llvm::Value *Address) const override; 1259 1260 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1261 StringRef Constraint, 1262 llvm::Type* Ty) const override { 1263 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1264 } 1265 1266 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1267 std::string &Constraints, 1268 std::vector<llvm::Type *> &ResultRegTypes, 1269 std::vector<llvm::Type *> &ResultTruncRegTypes, 1270 std::vector<LValue> &ResultRegDests, 1271 std::string &AsmString, 1272 unsigned NumOutputs) const override; 1273 1274 llvm::Constant * 1275 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1276 unsigned Sig = (0xeb << 0) | // jmp rel8 1277 (0x06 << 8) | // .+0x08 1278 ('v' << 16) | 1279 ('2' << 24); 1280 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1281 } 1282 1283 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1284 return "movl\t%ebp, %ebp" 1285 "\t\t// marker for objc_retainAutoreleaseReturnValue"; 1286 } 1287 }; 1288 1289 } 1290 1291 /// Rewrite input constraint references after adding some output constraints. 1292 /// In the case where there is one output and one input and we add one output, 1293 /// we need to replace all operand references greater than or equal to 1: 1294 /// mov $0, $1 1295 /// mov eax, $1 1296 /// The result will be: 1297 /// mov $0, $2 1298 /// mov eax, $2 1299 static void rewriteInputConstraintReferences(unsigned FirstIn, 1300 unsigned NumNewOuts, 1301 std::string &AsmString) { 1302 std::string Buf; 1303 llvm::raw_string_ostream OS(Buf); 1304 size_t Pos = 0; 1305 while (Pos < AsmString.size()) { 1306 size_t DollarStart = AsmString.find('$', Pos); 1307 if (DollarStart == std::string::npos) 1308 DollarStart = AsmString.size(); 1309 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1310 if (DollarEnd == std::string::npos) 1311 DollarEnd = AsmString.size(); 1312 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1313 Pos = DollarEnd; 1314 size_t NumDollars = DollarEnd - DollarStart; 1315 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1316 // We have an operand reference. 1317 size_t DigitStart = Pos; 1318 if (AsmString[DigitStart] == '{') { 1319 OS << '{'; 1320 ++DigitStart; 1321 } 1322 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1323 if (DigitEnd == std::string::npos) 1324 DigitEnd = AsmString.size(); 1325 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1326 unsigned OperandIndex; 1327 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1328 if (OperandIndex >= FirstIn) 1329 OperandIndex += NumNewOuts; 1330 OS << OperandIndex; 1331 } else { 1332 OS << OperandStr; 1333 } 1334 Pos = DigitEnd; 1335 } 1336 } 1337 AsmString = std::move(OS.str()); 1338 } 1339 1340 /// Add output constraints for EAX:EDX because they are return registers. 1341 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1342 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1343 std::vector<llvm::Type *> &ResultRegTypes, 1344 std::vector<llvm::Type *> &ResultTruncRegTypes, 1345 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1346 unsigned NumOutputs) const { 1347 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1348 1349 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1350 // larger. 1351 if (!Constraints.empty()) 1352 Constraints += ','; 1353 if (RetWidth <= 32) { 1354 Constraints += "={eax}"; 1355 ResultRegTypes.push_back(CGF.Int32Ty); 1356 } else { 1357 // Use the 'A' constraint for EAX:EDX. 1358 Constraints += "=A"; 1359 ResultRegTypes.push_back(CGF.Int64Ty); 1360 } 1361 1362 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1363 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1364 ResultTruncRegTypes.push_back(CoerceTy); 1365 1366 // Coerce the integer by bitcasting the return slot pointer. 1367 ReturnSlot.setAddress( 1368 CGF.Builder.CreateElementBitCast(ReturnSlot.getAddress(CGF), CoerceTy)); 1369 ResultRegDests.push_back(ReturnSlot); 1370 1371 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1372 } 1373 1374 /// shouldReturnTypeInRegister - Determine if the given type should be 1375 /// returned in a register (for the Darwin and MCU ABI). 1376 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1377 ASTContext &Context) const { 1378 uint64_t Size = Context.getTypeSize(Ty); 1379 1380 // For i386, type must be register sized. 1381 // For the MCU ABI, it only needs to be <= 8-byte 1382 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1383 return false; 1384 1385 if (Ty->isVectorType()) { 1386 // 64- and 128- bit vectors inside structures are not returned in 1387 // registers. 1388 if (Size == 64 || Size == 128) 1389 return false; 1390 1391 return true; 1392 } 1393 1394 // If this is a builtin, pointer, enum, complex type, member pointer, or 1395 // member function pointer it is ok. 1396 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1397 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1398 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1399 return true; 1400 1401 // Arrays are treated like records. 1402 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1403 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1404 1405 // Otherwise, it must be a record type. 1406 const RecordType *RT = Ty->getAs<RecordType>(); 1407 if (!RT) return false; 1408 1409 // FIXME: Traverse bases here too. 1410 1411 // Structure types are passed in register if all fields would be 1412 // passed in a register. 1413 for (const auto *FD : RT->getDecl()->fields()) { 1414 // Empty fields are ignored. 1415 if (isEmptyField(Context, FD, true)) 1416 continue; 1417 1418 // Check fields recursively. 1419 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1420 return false; 1421 } 1422 return true; 1423 } 1424 1425 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1426 // Treat complex types as the element type. 1427 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1428 Ty = CTy->getElementType(); 1429 1430 // Check for a type which we know has a simple scalar argument-passing 1431 // convention without any padding. (We're specifically looking for 32 1432 // and 64-bit integer and integer-equivalents, float, and double.) 1433 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1434 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1435 return false; 1436 1437 uint64_t Size = Context.getTypeSize(Ty); 1438 return Size == 32 || Size == 64; 1439 } 1440 1441 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1442 uint64_t &Size) { 1443 for (const auto *FD : RD->fields()) { 1444 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1445 // argument is smaller than 32-bits, expanding the struct will create 1446 // alignment padding. 1447 if (!is32Or64BitBasicType(FD->getType(), Context)) 1448 return false; 1449 1450 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1451 // how to expand them yet, and the predicate for telling if a bitfield still 1452 // counts as "basic" is more complicated than what we were doing previously. 1453 if (FD->isBitField()) 1454 return false; 1455 1456 Size += Context.getTypeSize(FD->getType()); 1457 } 1458 return true; 1459 } 1460 1461 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1462 uint64_t &Size) { 1463 // Don't do this if there are any non-empty bases. 1464 for (const CXXBaseSpecifier &Base : RD->bases()) { 1465 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1466 Size)) 1467 return false; 1468 } 1469 if (!addFieldSizes(Context, RD, Size)) 1470 return false; 1471 return true; 1472 } 1473 1474 /// Test whether an argument type which is to be passed indirectly (on the 1475 /// stack) would have the equivalent layout if it was expanded into separate 1476 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1477 /// optimizations. 1478 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1479 // We can only expand structure types. 1480 const RecordType *RT = Ty->getAs<RecordType>(); 1481 if (!RT) 1482 return false; 1483 const RecordDecl *RD = RT->getDecl(); 1484 uint64_t Size = 0; 1485 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1486 if (!IsWin32StructABI) { 1487 // On non-Windows, we have to conservatively match our old bitcode 1488 // prototypes in order to be ABI-compatible at the bitcode level. 1489 if (!CXXRD->isCLike()) 1490 return false; 1491 } else { 1492 // Don't do this for dynamic classes. 1493 if (CXXRD->isDynamicClass()) 1494 return false; 1495 } 1496 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1497 return false; 1498 } else { 1499 if (!addFieldSizes(getContext(), RD, Size)) 1500 return false; 1501 } 1502 1503 // We can do this if there was no alignment padding. 1504 return Size == getContext().getTypeSize(Ty); 1505 } 1506 1507 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1508 // If the return value is indirect, then the hidden argument is consuming one 1509 // integer register. 1510 if (State.FreeRegs) { 1511 --State.FreeRegs; 1512 if (!IsMCUABI) 1513 return getNaturalAlignIndirectInReg(RetTy); 1514 } 1515 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1516 } 1517 1518 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1519 CCState &State) const { 1520 if (RetTy->isVoidType()) 1521 return ABIArgInfo::getIgnore(); 1522 1523 const Type *Base = nullptr; 1524 uint64_t NumElts = 0; 1525 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1526 State.CC == llvm::CallingConv::X86_RegCall) && 1527 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1528 // The LLVM struct type for such an aggregate should lower properly. 1529 return ABIArgInfo::getDirect(); 1530 } 1531 1532 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1533 // On Darwin, some vectors are returned in registers. 1534 if (IsDarwinVectorABI) { 1535 uint64_t Size = getContext().getTypeSize(RetTy); 1536 1537 // 128-bit vectors are a special case; they are returned in 1538 // registers and we need to make sure to pick a type the LLVM 1539 // backend will like. 1540 if (Size == 128) 1541 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 1542 llvm::Type::getInt64Ty(getVMContext()), 2)); 1543 1544 // Always return in register if it fits in a general purpose 1545 // register, or if it is 64 bits and has a single element. 1546 if ((Size == 8 || Size == 16 || Size == 32) || 1547 (Size == 64 && VT->getNumElements() == 1)) 1548 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1549 Size)); 1550 1551 return getIndirectReturnResult(RetTy, State); 1552 } 1553 1554 return ABIArgInfo::getDirect(); 1555 } 1556 1557 if (isAggregateTypeForABI(RetTy)) { 1558 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1559 // Structures with flexible arrays are always indirect. 1560 if (RT->getDecl()->hasFlexibleArrayMember()) 1561 return getIndirectReturnResult(RetTy, State); 1562 } 1563 1564 // If specified, structs and unions are always indirect. 1565 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1566 return getIndirectReturnResult(RetTy, State); 1567 1568 // Ignore empty structs/unions. 1569 if (isEmptyRecord(getContext(), RetTy, true)) 1570 return ABIArgInfo::getIgnore(); 1571 1572 // Return complex of _Float16 as <2 x half> so the backend will use xmm0. 1573 if (const ComplexType *CT = RetTy->getAs<ComplexType>()) { 1574 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1575 if (ET->isFloat16Type()) 1576 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 1577 llvm::Type::getHalfTy(getVMContext()), 2)); 1578 } 1579 1580 // Small structures which are register sized are generally returned 1581 // in a register. 1582 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1583 uint64_t Size = getContext().getTypeSize(RetTy); 1584 1585 // As a special-case, if the struct is a "single-element" struct, and 1586 // the field is of type "float" or "double", return it in a 1587 // floating-point register. (MSVC does not apply this special case.) 1588 // We apply a similar transformation for pointer types to improve the 1589 // quality of the generated IR. 1590 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1591 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1592 || SeltTy->hasPointerRepresentation()) 1593 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1594 1595 // FIXME: We should be able to narrow this integer in cases with dead 1596 // padding. 1597 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1598 } 1599 1600 return getIndirectReturnResult(RetTy, State); 1601 } 1602 1603 // Treat an enum type as its underlying type. 1604 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1605 RetTy = EnumTy->getDecl()->getIntegerType(); 1606 1607 if (const auto *EIT = RetTy->getAs<BitIntType>()) 1608 if (EIT->getNumBits() > 64) 1609 return getIndirectReturnResult(RetTy, State); 1610 1611 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1612 : ABIArgInfo::getDirect()); 1613 } 1614 1615 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) { 1616 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1617 } 1618 1619 static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { 1620 const RecordType *RT = Ty->getAs<RecordType>(); 1621 if (!RT) 1622 return false; 1623 const RecordDecl *RD = RT->getDecl(); 1624 1625 // If this is a C++ record, check the bases first. 1626 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1627 for (const auto &I : CXXRD->bases()) 1628 if (!isRecordWithSIMDVectorType(Context, I.getType())) 1629 return false; 1630 1631 for (const auto *i : RD->fields()) { 1632 QualType FT = i->getType(); 1633 1634 if (isSIMDVectorType(Context, FT)) 1635 return true; 1636 1637 if (isRecordWithSIMDVectorType(Context, FT)) 1638 return true; 1639 } 1640 1641 return false; 1642 } 1643 1644 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1645 unsigned Align) const { 1646 // Otherwise, if the alignment is less than or equal to the minimum ABI 1647 // alignment, just use the default; the backend will handle this. 1648 if (Align <= MinABIStackAlignInBytes) 1649 return 0; // Use default alignment. 1650 1651 if (IsLinuxABI) { 1652 // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't 1653 // want to spend any effort dealing with the ramifications of ABI breaks. 1654 // 1655 // If the vector type is __m128/__m256/__m512, return the default alignment. 1656 if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64)) 1657 return Align; 1658 } 1659 // On non-Darwin, the stack type alignment is always 4. 1660 if (!IsDarwinVectorABI) { 1661 // Set explicit alignment, since we may need to realign the top. 1662 return MinABIStackAlignInBytes; 1663 } 1664 1665 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1666 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) || 1667 isRecordWithSIMDVectorType(getContext(), Ty))) 1668 return 16; 1669 1670 return MinABIStackAlignInBytes; 1671 } 1672 1673 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1674 CCState &State) const { 1675 if (!ByVal) { 1676 if (State.FreeRegs) { 1677 --State.FreeRegs; // Non-byval indirects just use one pointer. 1678 if (!IsMCUABI) 1679 return getNaturalAlignIndirectInReg(Ty); 1680 } 1681 return getNaturalAlignIndirect(Ty, false); 1682 } 1683 1684 // Compute the byval alignment. 1685 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1686 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1687 if (StackAlign == 0) 1688 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1689 1690 // If the stack alignment is less than the type alignment, realign the 1691 // argument. 1692 bool Realign = TypeAlign > StackAlign; 1693 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1694 /*ByVal=*/true, Realign); 1695 } 1696 1697 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1698 const Type *T = isSingleElementStruct(Ty, getContext()); 1699 if (!T) 1700 T = Ty.getTypePtr(); 1701 1702 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1703 BuiltinType::Kind K = BT->getKind(); 1704 if (K == BuiltinType::Float || K == BuiltinType::Double) 1705 return Float; 1706 } 1707 return Integer; 1708 } 1709 1710 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1711 if (!IsSoftFloatABI) { 1712 Class C = classify(Ty); 1713 if (C == Float) 1714 return false; 1715 } 1716 1717 unsigned Size = getContext().getTypeSize(Ty); 1718 unsigned SizeInRegs = (Size + 31) / 32; 1719 1720 if (SizeInRegs == 0) 1721 return false; 1722 1723 if (!IsMCUABI) { 1724 if (SizeInRegs > State.FreeRegs) { 1725 State.FreeRegs = 0; 1726 return false; 1727 } 1728 } else { 1729 // The MCU psABI allows passing parameters in-reg even if there are 1730 // earlier parameters that are passed on the stack. Also, 1731 // it does not allow passing >8-byte structs in-register, 1732 // even if there are 3 free registers available. 1733 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1734 return false; 1735 } 1736 1737 State.FreeRegs -= SizeInRegs; 1738 return true; 1739 } 1740 1741 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1742 bool &InReg, 1743 bool &NeedsPadding) const { 1744 // On Windows, aggregates other than HFAs are never passed in registers, and 1745 // they do not consume register slots. Homogenous floating-point aggregates 1746 // (HFAs) have already been dealt with at this point. 1747 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1748 return false; 1749 1750 NeedsPadding = false; 1751 InReg = !IsMCUABI; 1752 1753 if (!updateFreeRegs(Ty, State)) 1754 return false; 1755 1756 if (IsMCUABI) 1757 return true; 1758 1759 if (State.CC == llvm::CallingConv::X86_FastCall || 1760 State.CC == llvm::CallingConv::X86_VectorCall || 1761 State.CC == llvm::CallingConv::X86_RegCall) { 1762 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1763 NeedsPadding = true; 1764 1765 return false; 1766 } 1767 1768 return true; 1769 } 1770 1771 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1772 if (!updateFreeRegs(Ty, State)) 1773 return false; 1774 1775 if (IsMCUABI) 1776 return false; 1777 1778 if (State.CC == llvm::CallingConv::X86_FastCall || 1779 State.CC == llvm::CallingConv::X86_VectorCall || 1780 State.CC == llvm::CallingConv::X86_RegCall) { 1781 if (getContext().getTypeSize(Ty) > 32) 1782 return false; 1783 1784 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1785 Ty->isReferenceType()); 1786 } 1787 1788 return true; 1789 } 1790 1791 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { 1792 // Vectorcall x86 works subtly different than in x64, so the format is 1793 // a bit different than the x64 version. First, all vector types (not HVAs) 1794 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers. 1795 // This differs from the x64 implementation, where the first 6 by INDEX get 1796 // registers. 1797 // In the second pass over the arguments, HVAs are passed in the remaining 1798 // vector registers if possible, or indirectly by address. The address will be 1799 // passed in ECX/EDX if available. Any other arguments are passed according to 1800 // the usual fastcall rules. 1801 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 1802 for (int I = 0, E = Args.size(); I < E; ++I) { 1803 const Type *Base = nullptr; 1804 uint64_t NumElts = 0; 1805 const QualType &Ty = Args[I].type; 1806 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1807 isHomogeneousAggregate(Ty, Base, NumElts)) { 1808 if (State.FreeSSERegs >= NumElts) { 1809 State.FreeSSERegs -= NumElts; 1810 Args[I].info = ABIArgInfo::getDirectInReg(); 1811 State.IsPreassigned.set(I); 1812 } 1813 } 1814 } 1815 } 1816 1817 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1818 CCState &State) const { 1819 // FIXME: Set alignment on indirect arguments. 1820 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; 1821 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; 1822 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall; 1823 1824 Ty = useFirstFieldIfTransparentUnion(Ty); 1825 TypeInfo TI = getContext().getTypeInfo(Ty); 1826 1827 // Check with the C++ ABI first. 1828 const RecordType *RT = Ty->getAs<RecordType>(); 1829 if (RT) { 1830 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1831 if (RAA == CGCXXABI::RAA_Indirect) { 1832 return getIndirectResult(Ty, false, State); 1833 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1834 // The field index doesn't matter, we'll fix it up later. 1835 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1836 } 1837 } 1838 1839 // Regcall uses the concept of a homogenous vector aggregate, similar 1840 // to other targets. 1841 const Type *Base = nullptr; 1842 uint64_t NumElts = 0; 1843 if ((IsRegCall || IsVectorCall) && 1844 isHomogeneousAggregate(Ty, Base, NumElts)) { 1845 if (State.FreeSSERegs >= NumElts) { 1846 State.FreeSSERegs -= NumElts; 1847 1848 // Vectorcall passes HVAs directly and does not flatten them, but regcall 1849 // does. 1850 if (IsVectorCall) 1851 return getDirectX86Hva(); 1852 1853 if (Ty->isBuiltinType() || Ty->isVectorType()) 1854 return ABIArgInfo::getDirect(); 1855 return ABIArgInfo::getExpand(); 1856 } 1857 return getIndirectResult(Ty, /*ByVal=*/false, State); 1858 } 1859 1860 if (isAggregateTypeForABI(Ty)) { 1861 // Structures with flexible arrays are always indirect. 1862 // FIXME: This should not be byval! 1863 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1864 return getIndirectResult(Ty, true, State); 1865 1866 // Ignore empty structs/unions on non-Windows. 1867 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1868 return ABIArgInfo::getIgnore(); 1869 1870 llvm::LLVMContext &LLVMContext = getVMContext(); 1871 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1872 bool NeedsPadding = false; 1873 bool InReg; 1874 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1875 unsigned SizeInRegs = (TI.Width + 31) / 32; 1876 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1877 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1878 if (InReg) 1879 return ABIArgInfo::getDirectInReg(Result); 1880 else 1881 return ABIArgInfo::getDirect(Result); 1882 } 1883 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1884 1885 // Pass over-aligned aggregates on Windows indirectly. This behavior was 1886 // added in MSVC 2015. 1887 if (IsWin32StructABI && TI.isAlignRequired() && TI.Align > 32) 1888 return getIndirectResult(Ty, /*ByVal=*/false, State); 1889 1890 // Expand small (<= 128-bit) record types when we know that the stack layout 1891 // of those arguments will match the struct. This is important because the 1892 // LLVM backend isn't smart enough to remove byval, which inhibits many 1893 // optimizations. 1894 // Don't do this for the MCU if there are still free integer registers 1895 // (see X86_64 ABI for full explanation). 1896 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) && 1897 canExpandIndirectArgument(Ty)) 1898 return ABIArgInfo::getExpandWithPadding( 1899 IsFastCall || IsVectorCall || IsRegCall, PaddingType); 1900 1901 return getIndirectResult(Ty, true, State); 1902 } 1903 1904 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1905 // On Windows, vectors are passed directly if registers are available, or 1906 // indirectly if not. This avoids the need to align argument memory. Pass 1907 // user-defined vector types larger than 512 bits indirectly for simplicity. 1908 if (IsWin32StructABI) { 1909 if (TI.Width <= 512 && State.FreeSSERegs > 0) { 1910 --State.FreeSSERegs; 1911 return ABIArgInfo::getDirectInReg(); 1912 } 1913 return getIndirectResult(Ty, /*ByVal=*/false, State); 1914 } 1915 1916 // On Darwin, some vectors are passed in memory, we handle this by passing 1917 // it as an i8/i16/i32/i64. 1918 if (IsDarwinVectorABI) { 1919 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) || 1920 (TI.Width == 64 && VT->getNumElements() == 1)) 1921 return ABIArgInfo::getDirect( 1922 llvm::IntegerType::get(getVMContext(), TI.Width)); 1923 } 1924 1925 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1926 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1927 1928 return ABIArgInfo::getDirect(); 1929 } 1930 1931 1932 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1933 Ty = EnumTy->getDecl()->getIntegerType(); 1934 1935 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1936 1937 if (isPromotableIntegerTypeForABI(Ty)) { 1938 if (InReg) 1939 return ABIArgInfo::getExtendInReg(Ty); 1940 return ABIArgInfo::getExtend(Ty); 1941 } 1942 1943 if (const auto *EIT = Ty->getAs<BitIntType>()) { 1944 if (EIT->getNumBits() <= 64) { 1945 if (InReg) 1946 return ABIArgInfo::getDirectInReg(); 1947 return ABIArgInfo::getDirect(); 1948 } 1949 return getIndirectResult(Ty, /*ByVal=*/false, State); 1950 } 1951 1952 if (InReg) 1953 return ABIArgInfo::getDirectInReg(); 1954 return ABIArgInfo::getDirect(); 1955 } 1956 1957 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1958 CCState State(FI); 1959 if (IsMCUABI) 1960 State.FreeRegs = 3; 1961 else if (State.CC == llvm::CallingConv::X86_FastCall) { 1962 State.FreeRegs = 2; 1963 State.FreeSSERegs = 3; 1964 } else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1965 State.FreeRegs = 2; 1966 State.FreeSSERegs = 6; 1967 } else if (FI.getHasRegParm()) 1968 State.FreeRegs = FI.getRegParm(); 1969 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1970 State.FreeRegs = 5; 1971 State.FreeSSERegs = 8; 1972 } else if (IsWin32StructABI) { 1973 // Since MSVC 2015, the first three SSE vectors have been passed in 1974 // registers. The rest are passed indirectly. 1975 State.FreeRegs = DefaultNumRegisterParameters; 1976 State.FreeSSERegs = 3; 1977 } else 1978 State.FreeRegs = DefaultNumRegisterParameters; 1979 1980 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 1981 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1982 } else if (FI.getReturnInfo().isIndirect()) { 1983 // The C++ ABI is not aware of register usage, so we have to check if the 1984 // return value was sret and put it in a register ourselves if appropriate. 1985 if (State.FreeRegs) { 1986 --State.FreeRegs; // The sret parameter consumes a register. 1987 if (!IsMCUABI) 1988 FI.getReturnInfo().setInReg(true); 1989 } 1990 } 1991 1992 // The chain argument effectively gives us another free register. 1993 if (FI.isChainCall()) 1994 ++State.FreeRegs; 1995 1996 // For vectorcall, do a first pass over the arguments, assigning FP and vector 1997 // arguments to XMM registers as available. 1998 if (State.CC == llvm::CallingConv::X86_VectorCall) 1999 runVectorCallFirstPass(FI, State); 2000 2001 bool UsedInAlloca = false; 2002 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 2003 for (int I = 0, E = Args.size(); I < E; ++I) { 2004 // Skip arguments that have already been assigned. 2005 if (State.IsPreassigned.test(I)) 2006 continue; 2007 2008 Args[I].info = classifyArgumentType(Args[I].type, State); 2009 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); 2010 } 2011 2012 // If we needed to use inalloca for any argument, do a second pass and rewrite 2013 // all the memory arguments to use inalloca. 2014 if (UsedInAlloca) 2015 rewriteWithInAlloca(FI); 2016 } 2017 2018 void 2019 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 2020 CharUnits &StackOffset, ABIArgInfo &Info, 2021 QualType Type) const { 2022 // Arguments are always 4-byte-aligned. 2023 CharUnits WordSize = CharUnits::fromQuantity(4); 2024 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); 2025 2026 // sret pointers and indirect things will require an extra pointer 2027 // indirection, unless they are byval. Most things are byval, and will not 2028 // require this indirection. 2029 bool IsIndirect = false; 2030 if (Info.isIndirect() && !Info.getIndirectByVal()) 2031 IsIndirect = true; 2032 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect); 2033 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type); 2034 if (IsIndirect) 2035 LLTy = LLTy->getPointerTo(0); 2036 FrameFields.push_back(LLTy); 2037 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type); 2038 2039 // Insert padding bytes to respect alignment. 2040 CharUnits FieldEnd = StackOffset; 2041 StackOffset = FieldEnd.alignTo(WordSize); 2042 if (StackOffset != FieldEnd) { 2043 CharUnits NumBytes = StackOffset - FieldEnd; 2044 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 2045 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 2046 FrameFields.push_back(Ty); 2047 } 2048 } 2049 2050 static bool isArgInAlloca(const ABIArgInfo &Info) { 2051 // Leave ignored and inreg arguments alone. 2052 switch (Info.getKind()) { 2053 case ABIArgInfo::InAlloca: 2054 return true; 2055 case ABIArgInfo::Ignore: 2056 case ABIArgInfo::IndirectAliased: 2057 return false; 2058 case ABIArgInfo::Indirect: 2059 case ABIArgInfo::Direct: 2060 case ABIArgInfo::Extend: 2061 return !Info.getInReg(); 2062 case ABIArgInfo::Expand: 2063 case ABIArgInfo::CoerceAndExpand: 2064 // These are aggregate types which are never passed in registers when 2065 // inalloca is involved. 2066 return true; 2067 } 2068 llvm_unreachable("invalid enum"); 2069 } 2070 2071 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 2072 assert(IsWin32StructABI && "inalloca only supported on win32"); 2073 2074 // Build a packed struct type for all of the arguments in memory. 2075 SmallVector<llvm::Type *, 6> FrameFields; 2076 2077 // The stack alignment is always 4. 2078 CharUnits StackAlign = CharUnits::fromQuantity(4); 2079 2080 CharUnits StackOffset; 2081 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 2082 2083 // Put 'this' into the struct before 'sret', if necessary. 2084 bool IsThisCall = 2085 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 2086 ABIArgInfo &Ret = FI.getReturnInfo(); 2087 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 2088 isArgInAlloca(I->info)) { 2089 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2090 ++I; 2091 } 2092 2093 // Put the sret parameter into the inalloca struct if it's in memory. 2094 if (Ret.isIndirect() && !Ret.getInReg()) { 2095 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType()); 2096 // On Windows, the hidden sret parameter is always returned in eax. 2097 Ret.setInAllocaSRet(IsWin32StructABI); 2098 } 2099 2100 // Skip the 'this' parameter in ecx. 2101 if (IsThisCall) 2102 ++I; 2103 2104 // Put arguments passed in memory into the struct. 2105 for (; I != E; ++I) { 2106 if (isArgInAlloca(I->info)) 2107 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2108 } 2109 2110 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 2111 /*isPacked=*/true), 2112 StackAlign); 2113 } 2114 2115 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 2116 Address VAListAddr, QualType Ty) const { 2117 2118 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 2119 2120 // x86-32 changes the alignment of certain arguments on the stack. 2121 // 2122 // Just messing with TypeInfo like this works because we never pass 2123 // anything indirectly. 2124 TypeInfo.Align = CharUnits::fromQuantity( 2125 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity())); 2126 2127 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 2128 TypeInfo, CharUnits::fromQuantity(4), 2129 /*AllowHigherAlign*/ true); 2130 } 2131 2132 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 2133 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 2134 assert(Triple.getArch() == llvm::Triple::x86); 2135 2136 switch (Opts.getStructReturnConvention()) { 2137 case CodeGenOptions::SRCK_Default: 2138 break; 2139 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 2140 return false; 2141 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 2142 return true; 2143 } 2144 2145 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 2146 return true; 2147 2148 switch (Triple.getOS()) { 2149 case llvm::Triple::DragonFly: 2150 case llvm::Triple::FreeBSD: 2151 case llvm::Triple::OpenBSD: 2152 case llvm::Triple::Win32: 2153 return true; 2154 default: 2155 return false; 2156 } 2157 } 2158 2159 static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, 2160 CodeGen::CodeGenModule &CGM) { 2161 if (!FD->hasAttr<AnyX86InterruptAttr>()) 2162 return; 2163 2164 llvm::Function *Fn = cast<llvm::Function>(GV); 2165 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2166 if (FD->getNumParams() == 0) 2167 return; 2168 2169 auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType()); 2170 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType()); 2171 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType( 2172 Fn->getContext(), ByValTy); 2173 Fn->addParamAttr(0, NewAttr); 2174 } 2175 2176 void X86_32TargetCodeGenInfo::setTargetAttributes( 2177 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2178 if (GV->isDeclaration()) 2179 return; 2180 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2181 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2182 llvm::Function *Fn = cast<llvm::Function>(GV); 2183 Fn->addFnAttr("stackrealign"); 2184 } 2185 2186 addX86InterruptAttrs(FD, GV, CGM); 2187 } 2188 } 2189 2190 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 2191 CodeGen::CodeGenFunction &CGF, 2192 llvm::Value *Address) const { 2193 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2194 2195 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2196 2197 // 0-7 are the eight integer registers; the order is different 2198 // on Darwin (for EH), but the range is the same. 2199 // 8 is %eip. 2200 AssignToArrayRange(Builder, Address, Four8, 0, 8); 2201 2202 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 2203 // 12-16 are st(0..4). Not sure why we stop at 4. 2204 // These have size 16, which is sizeof(long double) on 2205 // platforms with 8-byte alignment for that type. 2206 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 2207 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 2208 2209 } else { 2210 // 9 is %eflags, which doesn't get a size on Darwin for some 2211 // reason. 2212 Builder.CreateAlignedStore( 2213 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 2214 CharUnits::One()); 2215 2216 // 11-16 are st(0..5). Not sure why we stop at 5. 2217 // These have size 12, which is sizeof(long double) on 2218 // platforms with 4-byte alignment for that type. 2219 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 2220 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 2221 } 2222 2223 return false; 2224 } 2225 2226 //===----------------------------------------------------------------------===// 2227 // X86-64 ABI Implementation 2228 //===----------------------------------------------------------------------===// 2229 2230 2231 namespace { 2232 /// The AVX ABI level for X86 targets. 2233 enum class X86AVXABILevel { 2234 None, 2235 AVX, 2236 AVX512 2237 }; 2238 2239 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 2240 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 2241 switch (AVXLevel) { 2242 case X86AVXABILevel::AVX512: 2243 return 512; 2244 case X86AVXABILevel::AVX: 2245 return 256; 2246 case X86AVXABILevel::None: 2247 return 128; 2248 } 2249 llvm_unreachable("Unknown AVXLevel"); 2250 } 2251 2252 /// X86_64ABIInfo - The X86_64 ABI information. 2253 class X86_64ABIInfo : public SwiftABIInfo { 2254 enum Class { 2255 Integer = 0, 2256 SSE, 2257 SSEUp, 2258 X87, 2259 X87Up, 2260 ComplexX87, 2261 NoClass, 2262 Memory 2263 }; 2264 2265 /// merge - Implement the X86_64 ABI merging algorithm. 2266 /// 2267 /// Merge an accumulating classification \arg Accum with a field 2268 /// classification \arg Field. 2269 /// 2270 /// \param Accum - The accumulating classification. This should 2271 /// always be either NoClass or the result of a previous merge 2272 /// call. In addition, this should never be Memory (the caller 2273 /// should just return Memory for the aggregate). 2274 static Class merge(Class Accum, Class Field); 2275 2276 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2277 /// 2278 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2279 /// final MEMORY or SSE classes when necessary. 2280 /// 2281 /// \param AggregateSize - The size of the current aggregate in 2282 /// the classification process. 2283 /// 2284 /// \param Lo - The classification for the parts of the type 2285 /// residing in the low word of the containing object. 2286 /// 2287 /// \param Hi - The classification for the parts of the type 2288 /// residing in the higher words of the containing object. 2289 /// 2290 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2291 2292 /// classify - Determine the x86_64 register classes in which the 2293 /// given type T should be passed. 2294 /// 2295 /// \param Lo - The classification for the parts of the type 2296 /// residing in the low word of the containing object. 2297 /// 2298 /// \param Hi - The classification for the parts of the type 2299 /// residing in the high word of the containing object. 2300 /// 2301 /// \param OffsetBase - The bit offset of this type in the 2302 /// containing object. Some parameters are classified different 2303 /// depending on whether they straddle an eightbyte boundary. 2304 /// 2305 /// \param isNamedArg - Whether the argument in question is a "named" 2306 /// argument, as used in AMD64-ABI 3.5.7. 2307 /// 2308 /// \param IsRegCall - Whether the calling conversion is regcall. 2309 /// 2310 /// If a word is unused its result will be NoClass; if a type should 2311 /// be passed in Memory then at least the classification of \arg Lo 2312 /// will be Memory. 2313 /// 2314 /// The \arg Lo class will be NoClass iff the argument is ignored. 2315 /// 2316 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2317 /// also be ComplexX87. 2318 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2319 bool isNamedArg, bool IsRegCall = false) const; 2320 2321 llvm::Type *GetByteVectorType(QualType Ty) const; 2322 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2323 unsigned IROffset, QualType SourceTy, 2324 unsigned SourceOffset) const; 2325 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2326 unsigned IROffset, QualType SourceTy, 2327 unsigned SourceOffset) const; 2328 2329 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2330 /// such that the argument will be returned in memory. 2331 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2332 2333 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2334 /// such that the argument will be passed in memory. 2335 /// 2336 /// \param freeIntRegs - The number of free integer registers remaining 2337 /// available. 2338 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2339 2340 ABIArgInfo classifyReturnType(QualType RetTy) const; 2341 2342 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2343 unsigned &neededInt, unsigned &neededSSE, 2344 bool isNamedArg, 2345 bool IsRegCall = false) const; 2346 2347 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2348 unsigned &NeededSSE, 2349 unsigned &MaxVectorWidth) const; 2350 2351 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2352 unsigned &NeededSSE, 2353 unsigned &MaxVectorWidth) const; 2354 2355 bool IsIllegalVectorType(QualType Ty) const; 2356 2357 /// The 0.98 ABI revision clarified a lot of ambiguities, 2358 /// unfortunately in ways that were not always consistent with 2359 /// certain previous compilers. In particular, platforms which 2360 /// required strict binary compatibility with older versions of GCC 2361 /// may need to exempt themselves. 2362 bool honorsRevision0_98() const { 2363 return !getTarget().getTriple().isOSDarwin(); 2364 } 2365 2366 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to 2367 /// classify it as INTEGER (for compatibility with older clang compilers). 2368 bool classifyIntegerMMXAsSSE() const { 2369 // Clang <= 3.8 did not do this. 2370 if (getContext().getLangOpts().getClangABICompat() <= 2371 LangOptions::ClangABI::Ver3_8) 2372 return false; 2373 2374 const llvm::Triple &Triple = getTarget().getTriple(); 2375 if (Triple.isOSDarwin() || Triple.isPS()) 2376 return false; 2377 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2378 return false; 2379 return true; 2380 } 2381 2382 // GCC classifies vectors of __int128 as memory. 2383 bool passInt128VectorsInMem() const { 2384 // Clang <= 9.0 did not do this. 2385 if (getContext().getLangOpts().getClangABICompat() <= 2386 LangOptions::ClangABI::Ver9) 2387 return false; 2388 2389 const llvm::Triple &T = getTarget().getTriple(); 2390 return T.isOSLinux() || T.isOSNetBSD(); 2391 } 2392 2393 X86AVXABILevel AVXLevel; 2394 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2395 // 64-bit hardware. 2396 bool Has64BitPointers; 2397 2398 public: 2399 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2400 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2401 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2402 } 2403 2404 bool isPassedUsingAVXType(QualType type) const { 2405 unsigned neededInt, neededSSE; 2406 // The freeIntRegs argument doesn't matter here. 2407 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2408 /*isNamedArg*/true); 2409 if (info.isDirect()) { 2410 llvm::Type *ty = info.getCoerceToType(); 2411 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2412 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; 2413 } 2414 return false; 2415 } 2416 2417 void computeInfo(CGFunctionInfo &FI) const override; 2418 2419 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2420 QualType Ty) const override; 2421 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2422 QualType Ty) const override; 2423 2424 bool has64BitPointers() const { 2425 return Has64BitPointers; 2426 } 2427 2428 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 2429 bool asReturnValue) const override { 2430 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2431 } 2432 bool isSwiftErrorInRegister() const override { 2433 return true; 2434 } 2435 }; 2436 2437 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2438 class WinX86_64ABIInfo : public SwiftABIInfo { 2439 public: 2440 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2441 : SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2442 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2443 2444 void computeInfo(CGFunctionInfo &FI) const override; 2445 2446 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2447 QualType Ty) const override; 2448 2449 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2450 // FIXME: Assumes vectorcall is in use. 2451 return isX86VectorTypeForVectorCall(getContext(), Ty); 2452 } 2453 2454 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2455 uint64_t NumMembers) const override { 2456 // FIXME: Assumes vectorcall is in use. 2457 return isX86VectorCallAggregateSmallEnough(NumMembers); 2458 } 2459 2460 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, 2461 bool asReturnValue) const override { 2462 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2463 } 2464 2465 bool isSwiftErrorInRegister() const override { 2466 return true; 2467 } 2468 2469 private: 2470 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2471 bool IsVectorCall, bool IsRegCall) const; 2472 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs, 2473 const ABIArgInfo ¤t) const; 2474 2475 X86AVXABILevel AVXLevel; 2476 2477 bool IsMingw64; 2478 }; 2479 2480 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2481 public: 2482 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2483 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {} 2484 2485 const X86_64ABIInfo &getABIInfo() const { 2486 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2487 } 2488 2489 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks 2490 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations. 2491 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; } 2492 2493 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2494 return 7; 2495 } 2496 2497 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2498 llvm::Value *Address) const override { 2499 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2500 2501 // 0-15 are the 16 integer registers. 2502 // 16 is %rip. 2503 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2504 return false; 2505 } 2506 2507 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2508 StringRef Constraint, 2509 llvm::Type* Ty) const override { 2510 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2511 } 2512 2513 bool isNoProtoCallVariadic(const CallArgList &args, 2514 const FunctionNoProtoType *fnType) const override { 2515 // The default CC on x86-64 sets %al to the number of SSA 2516 // registers used, and GCC sets this when calling an unprototyped 2517 // function, so we override the default behavior. However, don't do 2518 // that when AVX types are involved: the ABI explicitly states it is 2519 // undefined, and it doesn't work in practice because of how the ABI 2520 // defines varargs anyway. 2521 if (fnType->getCallConv() == CC_C) { 2522 bool HasAVXType = false; 2523 for (CallArgList::const_iterator 2524 it = args.begin(), ie = args.end(); it != ie; ++it) { 2525 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2526 HasAVXType = true; 2527 break; 2528 } 2529 } 2530 2531 if (!HasAVXType) 2532 return true; 2533 } 2534 2535 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2536 } 2537 2538 llvm::Constant * 2539 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2540 unsigned Sig = (0xeb << 0) | // jmp rel8 2541 (0x06 << 8) | // .+0x08 2542 ('v' << 16) | 2543 ('2' << 24); 2544 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2545 } 2546 2547 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2548 CodeGen::CodeGenModule &CGM) const override { 2549 if (GV->isDeclaration()) 2550 return; 2551 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2552 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2553 llvm::Function *Fn = cast<llvm::Function>(GV); 2554 Fn->addFnAttr("stackrealign"); 2555 } 2556 2557 addX86InterruptAttrs(FD, GV, CGM); 2558 } 2559 } 2560 2561 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, 2562 const FunctionDecl *Caller, 2563 const FunctionDecl *Callee, 2564 const CallArgList &Args) const override; 2565 }; 2566 2567 static void initFeatureMaps(const ASTContext &Ctx, 2568 llvm::StringMap<bool> &CallerMap, 2569 const FunctionDecl *Caller, 2570 llvm::StringMap<bool> &CalleeMap, 2571 const FunctionDecl *Callee) { 2572 if (CalleeMap.empty() && CallerMap.empty()) { 2573 // The caller is potentially nullptr in the case where the call isn't in a 2574 // function. In this case, the getFunctionFeatureMap ensures we just get 2575 // the TU level setting (since it cannot be modified by 'target'.. 2576 Ctx.getFunctionFeatureMap(CallerMap, Caller); 2577 Ctx.getFunctionFeatureMap(CalleeMap, Callee); 2578 } 2579 } 2580 2581 static bool checkAVXParamFeature(DiagnosticsEngine &Diag, 2582 SourceLocation CallLoc, 2583 const llvm::StringMap<bool> &CallerMap, 2584 const llvm::StringMap<bool> &CalleeMap, 2585 QualType Ty, StringRef Feature, 2586 bool IsArgument) { 2587 bool CallerHasFeat = CallerMap.lookup(Feature); 2588 bool CalleeHasFeat = CalleeMap.lookup(Feature); 2589 if (!CallerHasFeat && !CalleeHasFeat) 2590 return Diag.Report(CallLoc, diag::warn_avx_calling_convention) 2591 << IsArgument << Ty << Feature; 2592 2593 // Mixing calling conventions here is very clearly an error. 2594 if (!CallerHasFeat || !CalleeHasFeat) 2595 return Diag.Report(CallLoc, diag::err_avx_calling_convention) 2596 << IsArgument << Ty << Feature; 2597 2598 // Else, both caller and callee have the required feature, so there is no need 2599 // to diagnose. 2600 return false; 2601 } 2602 2603 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, 2604 SourceLocation CallLoc, 2605 const llvm::StringMap<bool> &CallerMap, 2606 const llvm::StringMap<bool> &CalleeMap, QualType Ty, 2607 bool IsArgument) { 2608 uint64_t Size = Ctx.getTypeSize(Ty); 2609 if (Size > 256) 2610 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, 2611 "avx512f", IsArgument); 2612 2613 if (Size > 128) 2614 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx", 2615 IsArgument); 2616 2617 return false; 2618 } 2619 2620 void X86_64TargetCodeGenInfo::checkFunctionCallABI( 2621 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, 2622 const FunctionDecl *Callee, const CallArgList &Args) const { 2623 llvm::StringMap<bool> CallerMap; 2624 llvm::StringMap<bool> CalleeMap; 2625 unsigned ArgIndex = 0; 2626 2627 // We need to loop through the actual call arguments rather than the the 2628 // function's parameters, in case this variadic. 2629 for (const CallArg &Arg : Args) { 2630 // The "avx" feature changes how vectors >128 in size are passed. "avx512f" 2631 // additionally changes how vectors >256 in size are passed. Like GCC, we 2632 // warn when a function is called with an argument where this will change. 2633 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is, 2634 // the caller and callee features are mismatched. 2635 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can 2636 // change its ABI with attribute-target after this call. 2637 if (Arg.getType()->isVectorType() && 2638 CGM.getContext().getTypeSize(Arg.getType()) > 128) { 2639 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2640 QualType Ty = Arg.getType(); 2641 // The CallArg seems to have desugared the type already, so for clearer 2642 // diagnostics, replace it with the type in the FunctionDecl if possible. 2643 if (ArgIndex < Callee->getNumParams()) 2644 Ty = Callee->getParamDecl(ArgIndex)->getType(); 2645 2646 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2647 CalleeMap, Ty, /*IsArgument*/ true)) 2648 return; 2649 } 2650 ++ArgIndex; 2651 } 2652 2653 // Check return always, as we don't have a good way of knowing in codegen 2654 // whether this value is used, tail-called, etc. 2655 if (Callee->getReturnType()->isVectorType() && 2656 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) { 2657 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2658 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2659 CalleeMap, Callee->getReturnType(), 2660 /*IsArgument*/ false); 2661 } 2662 } 2663 2664 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2665 // If the argument does not end in .lib, automatically add the suffix. 2666 // If the argument contains a space, enclose it in quotes. 2667 // This matches the behavior of MSVC. 2668 bool Quote = Lib.contains(' '); 2669 std::string ArgStr = Quote ? "\"" : ""; 2670 ArgStr += Lib; 2671 if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a")) 2672 ArgStr += ".lib"; 2673 ArgStr += Quote ? "\"" : ""; 2674 return ArgStr; 2675 } 2676 2677 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2678 public: 2679 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2680 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2681 unsigned NumRegisterParameters) 2682 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2683 Win32StructABI, NumRegisterParameters, false) {} 2684 2685 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2686 CodeGen::CodeGenModule &CGM) const override; 2687 2688 void getDependentLibraryOption(llvm::StringRef Lib, 2689 llvm::SmallString<24> &Opt) const override { 2690 Opt = "/DEFAULTLIB:"; 2691 Opt += qualifyWindowsLibrary(Lib); 2692 } 2693 2694 void getDetectMismatchOption(llvm::StringRef Name, 2695 llvm::StringRef Value, 2696 llvm::SmallString<32> &Opt) const override { 2697 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2698 } 2699 }; 2700 2701 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2702 CodeGen::CodeGenModule &CGM) { 2703 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { 2704 2705 if (CGM.getCodeGenOpts().StackProbeSize != 4096) 2706 Fn->addFnAttr("stack-probe-size", 2707 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2708 if (CGM.getCodeGenOpts().NoStackArgProbe) 2709 Fn->addFnAttr("no-stack-arg-probe"); 2710 } 2711 } 2712 2713 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2714 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2715 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2716 if (GV->isDeclaration()) 2717 return; 2718 addStackProbeTargetAttributes(D, GV, CGM); 2719 } 2720 2721 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2722 public: 2723 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2724 X86AVXABILevel AVXLevel) 2725 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {} 2726 2727 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2728 CodeGen::CodeGenModule &CGM) const override; 2729 2730 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2731 return 7; 2732 } 2733 2734 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2735 llvm::Value *Address) const override { 2736 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2737 2738 // 0-15 are the 16 integer registers. 2739 // 16 is %rip. 2740 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2741 return false; 2742 } 2743 2744 void getDependentLibraryOption(llvm::StringRef Lib, 2745 llvm::SmallString<24> &Opt) const override { 2746 Opt = "/DEFAULTLIB:"; 2747 Opt += qualifyWindowsLibrary(Lib); 2748 } 2749 2750 void getDetectMismatchOption(llvm::StringRef Name, 2751 llvm::StringRef Value, 2752 llvm::SmallString<32> &Opt) const override { 2753 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2754 } 2755 }; 2756 2757 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2758 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2759 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2760 if (GV->isDeclaration()) 2761 return; 2762 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2763 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2764 llvm::Function *Fn = cast<llvm::Function>(GV); 2765 Fn->addFnAttr("stackrealign"); 2766 } 2767 2768 addX86InterruptAttrs(FD, GV, CGM); 2769 } 2770 2771 addStackProbeTargetAttributes(D, GV, CGM); 2772 } 2773 } 2774 2775 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2776 Class &Hi) const { 2777 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2778 // 2779 // (a) If one of the classes is Memory, the whole argument is passed in 2780 // memory. 2781 // 2782 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2783 // memory. 2784 // 2785 // (c) If the size of the aggregate exceeds two eightbytes and the first 2786 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2787 // argument is passed in memory. NOTE: This is necessary to keep the 2788 // ABI working for processors that don't support the __m256 type. 2789 // 2790 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2791 // 2792 // Some of these are enforced by the merging logic. Others can arise 2793 // only with unions; for example: 2794 // union { _Complex double; unsigned; } 2795 // 2796 // Note that clauses (b) and (c) were added in 0.98. 2797 // 2798 if (Hi == Memory) 2799 Lo = Memory; 2800 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2801 Lo = Memory; 2802 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2803 Lo = Memory; 2804 if (Hi == SSEUp && Lo != SSE) 2805 Hi = SSE; 2806 } 2807 2808 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2809 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2810 // classified recursively so that always two fields are 2811 // considered. The resulting class is calculated according to 2812 // the classes of the fields in the eightbyte: 2813 // 2814 // (a) If both classes are equal, this is the resulting class. 2815 // 2816 // (b) If one of the classes is NO_CLASS, the resulting class is 2817 // the other class. 2818 // 2819 // (c) If one of the classes is MEMORY, the result is the MEMORY 2820 // class. 2821 // 2822 // (d) If one of the classes is INTEGER, the result is the 2823 // INTEGER. 2824 // 2825 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2826 // MEMORY is used as class. 2827 // 2828 // (f) Otherwise class SSE is used. 2829 2830 // Accum should never be memory (we should have returned) or 2831 // ComplexX87 (because this cannot be passed in a structure). 2832 assert((Accum != Memory && Accum != ComplexX87) && 2833 "Invalid accumulated classification during merge."); 2834 if (Accum == Field || Field == NoClass) 2835 return Accum; 2836 if (Field == Memory) 2837 return Memory; 2838 if (Accum == NoClass) 2839 return Field; 2840 if (Accum == Integer || Field == Integer) 2841 return Integer; 2842 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2843 Accum == X87 || Accum == X87Up) 2844 return Memory; 2845 return SSE; 2846 } 2847 2848 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, 2849 Class &Hi, bool isNamedArg, bool IsRegCall) const { 2850 // FIXME: This code can be simplified by introducing a simple value class for 2851 // Class pairs with appropriate constructor methods for the various 2852 // situations. 2853 2854 // FIXME: Some of the split computations are wrong; unaligned vectors 2855 // shouldn't be passed in registers for example, so there is no chance they 2856 // can straddle an eightbyte. Verify & simplify. 2857 2858 Lo = Hi = NoClass; 2859 2860 Class &Current = OffsetBase < 64 ? Lo : Hi; 2861 Current = Memory; 2862 2863 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2864 BuiltinType::Kind k = BT->getKind(); 2865 2866 if (k == BuiltinType::Void) { 2867 Current = NoClass; 2868 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2869 Lo = Integer; 2870 Hi = Integer; 2871 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2872 Current = Integer; 2873 } else if (k == BuiltinType::Float || k == BuiltinType::Double || 2874 k == BuiltinType::Float16) { 2875 Current = SSE; 2876 } else if (k == BuiltinType::LongDouble) { 2877 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2878 if (LDF == &llvm::APFloat::IEEEquad()) { 2879 Lo = SSE; 2880 Hi = SSEUp; 2881 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2882 Lo = X87; 2883 Hi = X87Up; 2884 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2885 Current = SSE; 2886 } else 2887 llvm_unreachable("unexpected long double representation!"); 2888 } 2889 // FIXME: _Decimal32 and _Decimal64 are SSE. 2890 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2891 return; 2892 } 2893 2894 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2895 // Classify the underlying integer type. 2896 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2897 return; 2898 } 2899 2900 if (Ty->hasPointerRepresentation()) { 2901 Current = Integer; 2902 return; 2903 } 2904 2905 if (Ty->isMemberPointerType()) { 2906 if (Ty->isMemberFunctionPointerType()) { 2907 if (Has64BitPointers) { 2908 // If Has64BitPointers, this is an {i64, i64}, so classify both 2909 // Lo and Hi now. 2910 Lo = Hi = Integer; 2911 } else { 2912 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2913 // straddles an eightbyte boundary, Hi should be classified as well. 2914 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2915 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2916 if (EB_FuncPtr != EB_ThisAdj) { 2917 Lo = Hi = Integer; 2918 } else { 2919 Current = Integer; 2920 } 2921 } 2922 } else { 2923 Current = Integer; 2924 } 2925 return; 2926 } 2927 2928 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2929 uint64_t Size = getContext().getTypeSize(VT); 2930 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2931 // gcc passes the following as integer: 2932 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2933 // 2 bytes - <2 x char>, <1 x short> 2934 // 1 byte - <1 x char> 2935 Current = Integer; 2936 2937 // If this type crosses an eightbyte boundary, it should be 2938 // split. 2939 uint64_t EB_Lo = (OffsetBase) / 64; 2940 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2941 if (EB_Lo != EB_Hi) 2942 Hi = Lo; 2943 } else if (Size == 64) { 2944 QualType ElementType = VT->getElementType(); 2945 2946 // gcc passes <1 x double> in memory. :( 2947 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2948 return; 2949 2950 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2951 // pass them as integer. For platforms where clang is the de facto 2952 // platform compiler, we must continue to use integer. 2953 if (!classifyIntegerMMXAsSSE() && 2954 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2955 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2956 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2957 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2958 Current = Integer; 2959 else 2960 Current = SSE; 2961 2962 // If this type crosses an eightbyte boundary, it should be 2963 // split. 2964 if (OffsetBase && OffsetBase != 64) 2965 Hi = Lo; 2966 } else if (Size == 128 || 2967 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2968 QualType ElementType = VT->getElementType(); 2969 2970 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :( 2971 if (passInt128VectorsInMem() && Size != 128 && 2972 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || 2973 ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) 2974 return; 2975 2976 // Arguments of 256-bits are split into four eightbyte chunks. The 2977 // least significant one belongs to class SSE and all the others to class 2978 // SSEUP. The original Lo and Hi design considers that types can't be 2979 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2980 // This design isn't correct for 256-bits, but since there're no cases 2981 // where the upper parts would need to be inspected, avoid adding 2982 // complexity and just consider Hi to match the 64-256 part. 2983 // 2984 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2985 // registers if they are "named", i.e. not part of the "..." of a 2986 // variadic function. 2987 // 2988 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2989 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2990 Lo = SSE; 2991 Hi = SSEUp; 2992 } 2993 return; 2994 } 2995 2996 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2997 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2998 2999 uint64_t Size = getContext().getTypeSize(Ty); 3000 if (ET->isIntegralOrEnumerationType()) { 3001 if (Size <= 64) 3002 Current = Integer; 3003 else if (Size <= 128) 3004 Lo = Hi = Integer; 3005 } else if (ET->isFloat16Type() || ET == getContext().FloatTy) { 3006 Current = SSE; 3007 } else if (ET == getContext().DoubleTy) { 3008 Lo = Hi = SSE; 3009 } else if (ET == getContext().LongDoubleTy) { 3010 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 3011 if (LDF == &llvm::APFloat::IEEEquad()) 3012 Current = Memory; 3013 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 3014 Current = ComplexX87; 3015 else if (LDF == &llvm::APFloat::IEEEdouble()) 3016 Lo = Hi = SSE; 3017 else 3018 llvm_unreachable("unexpected long double representation!"); 3019 } 3020 3021 // If this complex type crosses an eightbyte boundary then it 3022 // should be split. 3023 uint64_t EB_Real = (OffsetBase) / 64; 3024 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 3025 if (Hi == NoClass && EB_Real != EB_Imag) 3026 Hi = Lo; 3027 3028 return; 3029 } 3030 3031 if (const auto *EITy = Ty->getAs<BitIntType>()) { 3032 if (EITy->getNumBits() <= 64) 3033 Current = Integer; 3034 else if (EITy->getNumBits() <= 128) 3035 Lo = Hi = Integer; 3036 // Larger values need to get passed in memory. 3037 return; 3038 } 3039 3040 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 3041 // Arrays are treated like structures. 3042 3043 uint64_t Size = getContext().getTypeSize(Ty); 3044 3045 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 3046 // than eight eightbytes, ..., it has class MEMORY. 3047 // regcall ABI doesn't have limitation to an object. The only limitation 3048 // is the free registers, which will be checked in computeInfo. 3049 if (!IsRegCall && Size > 512) 3050 return; 3051 3052 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 3053 // fields, it has class MEMORY. 3054 // 3055 // Only need to check alignment of array base. 3056 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 3057 return; 3058 3059 // Otherwise implement simplified merge. We could be smarter about 3060 // this, but it isn't worth it and would be harder to verify. 3061 Current = NoClass; 3062 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 3063 uint64_t ArraySize = AT->getSize().getZExtValue(); 3064 3065 // The only case a 256-bit wide vector could be used is when the array 3066 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 3067 // to work for sizes wider than 128, early check and fallback to memory. 3068 // 3069 if (Size > 128 && 3070 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 3071 return; 3072 3073 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 3074 Class FieldLo, FieldHi; 3075 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 3076 Lo = merge(Lo, FieldLo); 3077 Hi = merge(Hi, FieldHi); 3078 if (Lo == Memory || Hi == Memory) 3079 break; 3080 } 3081 3082 postMerge(Size, Lo, Hi); 3083 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 3084 return; 3085 } 3086 3087 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3088 uint64_t Size = getContext().getTypeSize(Ty); 3089 3090 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 3091 // than eight eightbytes, ..., it has class MEMORY. 3092 if (Size > 512) 3093 return; 3094 3095 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 3096 // copy constructor or a non-trivial destructor, it is passed by invisible 3097 // reference. 3098 if (getRecordArgABI(RT, getCXXABI())) 3099 return; 3100 3101 const RecordDecl *RD = RT->getDecl(); 3102 3103 // Assume variable sized types are passed in memory. 3104 if (RD->hasFlexibleArrayMember()) 3105 return; 3106 3107 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3108 3109 // Reset Lo class, this will be recomputed. 3110 Current = NoClass; 3111 3112 // If this is a C++ record, classify the bases first. 3113 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3114 for (const auto &I : CXXRD->bases()) { 3115 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3116 "Unexpected base class!"); 3117 const auto *Base = 3118 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3119 3120 // Classify this field. 3121 // 3122 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 3123 // single eightbyte, each is classified separately. Each eightbyte gets 3124 // initialized to class NO_CLASS. 3125 Class FieldLo, FieldHi; 3126 uint64_t Offset = 3127 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 3128 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 3129 Lo = merge(Lo, FieldLo); 3130 Hi = merge(Hi, FieldHi); 3131 if (Lo == Memory || Hi == Memory) { 3132 postMerge(Size, Lo, Hi); 3133 return; 3134 } 3135 } 3136 } 3137 3138 // Classify the fields one at a time, merging the results. 3139 unsigned idx = 0; 3140 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <= 3141 LangOptions::ClangABI::Ver11 || 3142 getContext().getTargetInfo().getTriple().isPS(); 3143 bool IsUnion = RT->isUnionType() && !UseClang11Compat; 3144 3145 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3146 i != e; ++i, ++idx) { 3147 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3148 bool BitField = i->isBitField(); 3149 3150 // Ignore padding bit-fields. 3151 if (BitField && i->isUnnamedBitfield()) 3152 continue; 3153 3154 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 3155 // eight eightbytes, or it contains unaligned fields, it has class MEMORY. 3156 // 3157 // The only case a 256-bit or a 512-bit wide vector could be used is when 3158 // the struct contains a single 256-bit or 512-bit element. Early check 3159 // and fallback to memory. 3160 // 3161 // FIXME: Extended the Lo and Hi logic properly to work for size wider 3162 // than 128. 3163 if (Size > 128 && 3164 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) || 3165 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 3166 Lo = Memory; 3167 postMerge(Size, Lo, Hi); 3168 return; 3169 } 3170 // Note, skip this test for bit-fields, see below. 3171 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 3172 Lo = Memory; 3173 postMerge(Size, Lo, Hi); 3174 return; 3175 } 3176 3177 // Classify this field. 3178 // 3179 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 3180 // exceeds a single eightbyte, each is classified 3181 // separately. Each eightbyte gets initialized to class 3182 // NO_CLASS. 3183 Class FieldLo, FieldHi; 3184 3185 // Bit-fields require special handling, they do not force the 3186 // structure to be passed in memory even if unaligned, and 3187 // therefore they can straddle an eightbyte. 3188 if (BitField) { 3189 assert(!i->isUnnamedBitfield()); 3190 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3191 uint64_t Size = i->getBitWidthValue(getContext()); 3192 3193 uint64_t EB_Lo = Offset / 64; 3194 uint64_t EB_Hi = (Offset + Size - 1) / 64; 3195 3196 if (EB_Lo) { 3197 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 3198 FieldLo = NoClass; 3199 FieldHi = Integer; 3200 } else { 3201 FieldLo = Integer; 3202 FieldHi = EB_Hi ? Integer : NoClass; 3203 } 3204 } else 3205 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 3206 Lo = merge(Lo, FieldLo); 3207 Hi = merge(Hi, FieldHi); 3208 if (Lo == Memory || Hi == Memory) 3209 break; 3210 } 3211 3212 postMerge(Size, Lo, Hi); 3213 } 3214 } 3215 3216 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 3217 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3218 // place naturally. 3219 if (!isAggregateTypeForABI(Ty)) { 3220 // Treat an enum type as its underlying type. 3221 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3222 Ty = EnumTy->getDecl()->getIntegerType(); 3223 3224 if (Ty->isBitIntType()) 3225 return getNaturalAlignIndirect(Ty); 3226 3227 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3228 : ABIArgInfo::getDirect()); 3229 } 3230 3231 return getNaturalAlignIndirect(Ty); 3232 } 3233 3234 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 3235 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 3236 uint64_t Size = getContext().getTypeSize(VecTy); 3237 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 3238 if (Size <= 64 || Size > LargestVector) 3239 return true; 3240 QualType EltTy = VecTy->getElementType(); 3241 if (passInt128VectorsInMem() && 3242 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || 3243 EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) 3244 return true; 3245 } 3246 3247 return false; 3248 } 3249 3250 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 3251 unsigned freeIntRegs) const { 3252 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3253 // place naturally. 3254 // 3255 // This assumption is optimistic, as there could be free registers available 3256 // when we need to pass this argument in memory, and LLVM could try to pass 3257 // the argument in the free register. This does not seem to happen currently, 3258 // but this code would be much safer if we could mark the argument with 3259 // 'onstack'. See PR12193. 3260 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) && 3261 !Ty->isBitIntType()) { 3262 // Treat an enum type as its underlying type. 3263 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3264 Ty = EnumTy->getDecl()->getIntegerType(); 3265 3266 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3267 : ABIArgInfo::getDirect()); 3268 } 3269 3270 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3271 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3272 3273 // Compute the byval alignment. We specify the alignment of the byval in all 3274 // cases so that the mid-level optimizer knows the alignment of the byval. 3275 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 3276 3277 // Attempt to avoid passing indirect results using byval when possible. This 3278 // is important for good codegen. 3279 // 3280 // We do this by coercing the value into a scalar type which the backend can 3281 // handle naturally (i.e., without using byval). 3282 // 3283 // For simplicity, we currently only do this when we have exhausted all of the 3284 // free integer registers. Doing this when there are free integer registers 3285 // would require more care, as we would have to ensure that the coerced value 3286 // did not claim the unused register. That would require either reording the 3287 // arguments to the function (so that any subsequent inreg values came first), 3288 // or only doing this optimization when there were no following arguments that 3289 // might be inreg. 3290 // 3291 // We currently expect it to be rare (particularly in well written code) for 3292 // arguments to be passed on the stack when there are still free integer 3293 // registers available (this would typically imply large structs being passed 3294 // by value), so this seems like a fair tradeoff for now. 3295 // 3296 // We can revisit this if the backend grows support for 'onstack' parameter 3297 // attributes. See PR12193. 3298 if (freeIntRegs == 0) { 3299 uint64_t Size = getContext().getTypeSize(Ty); 3300 3301 // If this type fits in an eightbyte, coerce it into the matching integral 3302 // type, which will end up on the stack (with alignment 8). 3303 if (Align == 8 && Size <= 64) 3304 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3305 Size)); 3306 } 3307 3308 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 3309 } 3310 3311 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 3312 /// register. Pick an LLVM IR type that will be passed as a vector register. 3313 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 3314 // Wrapper structs/arrays that only contain vectors are passed just like 3315 // vectors; strip them off if present. 3316 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 3317 Ty = QualType(InnerTy, 0); 3318 3319 llvm::Type *IRType = CGT.ConvertType(Ty); 3320 if (isa<llvm::VectorType>(IRType)) { 3321 // Don't pass vXi128 vectors in their native type, the backend can't 3322 // legalize them. 3323 if (passInt128VectorsInMem() && 3324 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) { 3325 // Use a vXi64 vector. 3326 uint64_t Size = getContext().getTypeSize(Ty); 3327 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()), 3328 Size / 64); 3329 } 3330 3331 return IRType; 3332 } 3333 3334 if (IRType->getTypeID() == llvm::Type::FP128TyID) 3335 return IRType; 3336 3337 // We couldn't find the preferred IR vector type for 'Ty'. 3338 uint64_t Size = getContext().getTypeSize(Ty); 3339 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 3340 3341 3342 // Return a LLVM IR vector type based on the size of 'Ty'. 3343 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), 3344 Size / 64); 3345 } 3346 3347 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 3348 /// is known to either be off the end of the specified type or being in 3349 /// alignment padding. The user type specified is known to be at most 128 bits 3350 /// in size, and have passed through X86_64ABIInfo::classify with a successful 3351 /// classification that put one of the two halves in the INTEGER class. 3352 /// 3353 /// It is conservatively correct to return false. 3354 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 3355 unsigned EndBit, ASTContext &Context) { 3356 // If the bytes being queried are off the end of the type, there is no user 3357 // data hiding here. This handles analysis of builtins, vectors and other 3358 // types that don't contain interesting padding. 3359 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 3360 if (TySize <= StartBit) 3361 return true; 3362 3363 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3364 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 3365 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 3366 3367 // Check each element to see if the element overlaps with the queried range. 3368 for (unsigned i = 0; i != NumElts; ++i) { 3369 // If the element is after the span we care about, then we're done.. 3370 unsigned EltOffset = i*EltSize; 3371 if (EltOffset >= EndBit) break; 3372 3373 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 3374 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 3375 EndBit-EltOffset, Context)) 3376 return false; 3377 } 3378 // If it overlaps no elements, then it is safe to process as padding. 3379 return true; 3380 } 3381 3382 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3383 const RecordDecl *RD = RT->getDecl(); 3384 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3385 3386 // If this is a C++ record, check the bases first. 3387 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3388 for (const auto &I : CXXRD->bases()) { 3389 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3390 "Unexpected base class!"); 3391 const auto *Base = 3392 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3393 3394 // If the base is after the span we care about, ignore it. 3395 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 3396 if (BaseOffset >= EndBit) continue; 3397 3398 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 3399 if (!BitsContainNoUserData(I.getType(), BaseStart, 3400 EndBit-BaseOffset, Context)) 3401 return false; 3402 } 3403 } 3404 3405 // Verify that no field has data that overlaps the region of interest. Yes 3406 // this could be sped up a lot by being smarter about queried fields, 3407 // however we're only looking at structs up to 16 bytes, so we don't care 3408 // much. 3409 unsigned idx = 0; 3410 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3411 i != e; ++i, ++idx) { 3412 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3413 3414 // If we found a field after the region we care about, then we're done. 3415 if (FieldOffset >= EndBit) break; 3416 3417 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3418 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3419 Context)) 3420 return false; 3421 } 3422 3423 // If nothing in this record overlapped the area of interest, then we're 3424 // clean. 3425 return true; 3426 } 3427 3428 return false; 3429 } 3430 3431 /// getFPTypeAtOffset - Return a floating point type at the specified offset. 3432 static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3433 const llvm::DataLayout &TD) { 3434 if (IROffset == 0 && IRType->isFloatingPointTy()) 3435 return IRType; 3436 3437 // If this is a struct, recurse into the field at the specified offset. 3438 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3439 if (!STy->getNumContainedTypes()) 3440 return nullptr; 3441 3442 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3443 unsigned Elt = SL->getElementContainingOffset(IROffset); 3444 IROffset -= SL->getElementOffset(Elt); 3445 return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD); 3446 } 3447 3448 // If this is an array, recurse into the field at the specified offset. 3449 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3450 llvm::Type *EltTy = ATy->getElementType(); 3451 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3452 IROffset -= IROffset / EltSize * EltSize; 3453 return getFPTypeAtOffset(EltTy, IROffset, TD); 3454 } 3455 3456 return nullptr; 3457 } 3458 3459 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3460 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3461 llvm::Type *X86_64ABIInfo:: 3462 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3463 QualType SourceTy, unsigned SourceOffset) const { 3464 const llvm::DataLayout &TD = getDataLayout(); 3465 unsigned SourceSize = 3466 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset; 3467 llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD); 3468 if (!T0 || T0->isDoubleTy()) 3469 return llvm::Type::getDoubleTy(getVMContext()); 3470 3471 // Get the adjacent FP type. 3472 llvm::Type *T1 = nullptr; 3473 unsigned T0Size = TD.getTypeAllocSize(T0); 3474 if (SourceSize > T0Size) 3475 T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD); 3476 if (T1 == nullptr) { 3477 // Check if IRType is a half + float. float type will be in IROffset+4 due 3478 // to its alignment. 3479 if (T0->isHalfTy() && SourceSize > 4) 3480 T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD); 3481 // If we can't get a second FP type, return a simple half or float. 3482 // avx512fp16-abi.c:pr51813_2 shows it works to return float for 3483 // {float, i8} too. 3484 if (T1 == nullptr) 3485 return T0; 3486 } 3487 3488 if (T0->isFloatTy() && T1->isFloatTy()) 3489 return llvm::FixedVectorType::get(T0, 2); 3490 3491 if (T0->isHalfTy() && T1->isHalfTy()) { 3492 llvm::Type *T2 = nullptr; 3493 if (SourceSize > 4) 3494 T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD); 3495 if (T2 == nullptr) 3496 return llvm::FixedVectorType::get(T0, 2); 3497 return llvm::FixedVectorType::get(T0, 4); 3498 } 3499 3500 if (T0->isHalfTy() || T1->isHalfTy()) 3501 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4); 3502 3503 return llvm::Type::getDoubleTy(getVMContext()); 3504 } 3505 3506 3507 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3508 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3509 /// about the high or low part of an up-to-16-byte struct. This routine picks 3510 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3511 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3512 /// etc). 3513 /// 3514 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3515 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3516 /// the 8-byte value references. PrefType may be null. 3517 /// 3518 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3519 /// an offset into this that we're processing (which is always either 0 or 8). 3520 /// 3521 llvm::Type *X86_64ABIInfo:: 3522 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3523 QualType SourceTy, unsigned SourceOffset) const { 3524 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3525 // returning an 8-byte unit starting with it. See if we can safely use it. 3526 if (IROffset == 0) { 3527 // Pointers and int64's always fill the 8-byte unit. 3528 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3529 IRType->isIntegerTy(64)) 3530 return IRType; 3531 3532 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3533 // goodness in the source type is just tail padding. This is allowed to 3534 // kick in for struct {double,int} on the int, but not on 3535 // struct{double,int,int} because we wouldn't return the second int. We 3536 // have to do this analysis on the source type because we can't depend on 3537 // unions being lowered a specific way etc. 3538 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3539 IRType->isIntegerTy(32) || 3540 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3541 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3542 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3543 3544 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3545 SourceOffset*8+64, getContext())) 3546 return IRType; 3547 } 3548 } 3549 3550 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3551 // If this is a struct, recurse into the field at the specified offset. 3552 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3553 if (IROffset < SL->getSizeInBytes()) { 3554 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3555 IROffset -= SL->getElementOffset(FieldIdx); 3556 3557 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3558 SourceTy, SourceOffset); 3559 } 3560 } 3561 3562 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3563 llvm::Type *EltTy = ATy->getElementType(); 3564 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3565 unsigned EltOffset = IROffset/EltSize*EltSize; 3566 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3567 SourceOffset); 3568 } 3569 3570 // Okay, we don't have any better idea of what to pass, so we pass this in an 3571 // integer register that isn't too big to fit the rest of the struct. 3572 unsigned TySizeInBytes = 3573 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3574 3575 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3576 3577 // It is always safe to classify this as an integer type up to i64 that 3578 // isn't larger than the structure. 3579 return llvm::IntegerType::get(getVMContext(), 3580 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3581 } 3582 3583 3584 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3585 /// be used as elements of a two register pair to pass or return, return a 3586 /// first class aggregate to represent them. For example, if the low part of 3587 /// a by-value argument should be passed as i32* and the high part as float, 3588 /// return {i32*, float}. 3589 static llvm::Type * 3590 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3591 const llvm::DataLayout &TD) { 3592 // In order to correctly satisfy the ABI, we need to the high part to start 3593 // at offset 8. If the high and low parts we inferred are both 4-byte types 3594 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3595 // the second element at offset 8. Check for this: 3596 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3597 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3598 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3599 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3600 3601 // To handle this, we have to increase the size of the low part so that the 3602 // second element will start at an 8 byte offset. We can't increase the size 3603 // of the second element because it might make us access off the end of the 3604 // struct. 3605 if (HiStart != 8) { 3606 // There are usually two sorts of types the ABI generation code can produce 3607 // for the low part of a pair that aren't 8 bytes in size: half, float or 3608 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3609 // NaCl). 3610 // Promote these to a larger type. 3611 if (Lo->isHalfTy() || Lo->isFloatTy()) 3612 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3613 else { 3614 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3615 && "Invalid/unknown lo type"); 3616 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3617 } 3618 } 3619 3620 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3621 3622 // Verify that the second element is at an 8-byte offset. 3623 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3624 "Invalid x86-64 argument pair!"); 3625 return Result; 3626 } 3627 3628 ABIArgInfo X86_64ABIInfo:: 3629 classifyReturnType(QualType RetTy) const { 3630 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3631 // classification algorithm. 3632 X86_64ABIInfo::Class Lo, Hi; 3633 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3634 3635 // Check some invariants. 3636 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3637 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3638 3639 llvm::Type *ResType = nullptr; 3640 switch (Lo) { 3641 case NoClass: 3642 if (Hi == NoClass) 3643 return ABIArgInfo::getIgnore(); 3644 // If the low part is just padding, it takes no register, leave ResType 3645 // null. 3646 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3647 "Unknown missing lo part"); 3648 break; 3649 3650 case SSEUp: 3651 case X87Up: 3652 llvm_unreachable("Invalid classification for lo word."); 3653 3654 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3655 // hidden argument. 3656 case Memory: 3657 return getIndirectReturnResult(RetTy); 3658 3659 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3660 // available register of the sequence %rax, %rdx is used. 3661 case Integer: 3662 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3663 3664 // If we have a sign or zero extended integer, make sure to return Extend 3665 // so that the parameter gets the right LLVM IR attributes. 3666 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3667 // Treat an enum type as its underlying type. 3668 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3669 RetTy = EnumTy->getDecl()->getIntegerType(); 3670 3671 if (RetTy->isIntegralOrEnumerationType() && 3672 isPromotableIntegerTypeForABI(RetTy)) 3673 return ABIArgInfo::getExtend(RetTy); 3674 } 3675 break; 3676 3677 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3678 // available SSE register of the sequence %xmm0, %xmm1 is used. 3679 case SSE: 3680 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3681 break; 3682 3683 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3684 // returned on the X87 stack in %st0 as 80-bit x87 number. 3685 case X87: 3686 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3687 break; 3688 3689 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3690 // part of the value is returned in %st0 and the imaginary part in 3691 // %st1. 3692 case ComplexX87: 3693 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3694 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3695 llvm::Type::getX86_FP80Ty(getVMContext())); 3696 break; 3697 } 3698 3699 llvm::Type *HighPart = nullptr; 3700 switch (Hi) { 3701 // Memory was handled previously and X87 should 3702 // never occur as a hi class. 3703 case Memory: 3704 case X87: 3705 llvm_unreachable("Invalid classification for hi word."); 3706 3707 case ComplexX87: // Previously handled. 3708 case NoClass: 3709 break; 3710 3711 case Integer: 3712 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3713 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3714 return ABIArgInfo::getDirect(HighPart, 8); 3715 break; 3716 case SSE: 3717 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3718 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3719 return ABIArgInfo::getDirect(HighPart, 8); 3720 break; 3721 3722 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3723 // is passed in the next available eightbyte chunk if the last used 3724 // vector register. 3725 // 3726 // SSEUP should always be preceded by SSE, just widen. 3727 case SSEUp: 3728 assert(Lo == SSE && "Unexpected SSEUp classification."); 3729 ResType = GetByteVectorType(RetTy); 3730 break; 3731 3732 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3733 // returned together with the previous X87 value in %st0. 3734 case X87Up: 3735 // If X87Up is preceded by X87, we don't need to do 3736 // anything. However, in some cases with unions it may not be 3737 // preceded by X87. In such situations we follow gcc and pass the 3738 // extra bits in an SSE reg. 3739 if (Lo != X87) { 3740 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3741 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3742 return ABIArgInfo::getDirect(HighPart, 8); 3743 } 3744 break; 3745 } 3746 3747 // If a high part was specified, merge it together with the low part. It is 3748 // known to pass in the high eightbyte of the result. We do this by forming a 3749 // first class struct aggregate with the high and low part: {low, high} 3750 if (HighPart) 3751 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3752 3753 return ABIArgInfo::getDirect(ResType); 3754 } 3755 3756 ABIArgInfo 3757 X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs, 3758 unsigned &neededInt, unsigned &neededSSE, 3759 bool isNamedArg, bool IsRegCall) const { 3760 Ty = useFirstFieldIfTransparentUnion(Ty); 3761 3762 X86_64ABIInfo::Class Lo, Hi; 3763 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall); 3764 3765 // Check some invariants. 3766 // FIXME: Enforce these by construction. 3767 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3768 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3769 3770 neededInt = 0; 3771 neededSSE = 0; 3772 llvm::Type *ResType = nullptr; 3773 switch (Lo) { 3774 case NoClass: 3775 if (Hi == NoClass) 3776 return ABIArgInfo::getIgnore(); 3777 // If the low part is just padding, it takes no register, leave ResType 3778 // null. 3779 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3780 "Unknown missing lo part"); 3781 break; 3782 3783 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3784 // on the stack. 3785 case Memory: 3786 3787 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3788 // COMPLEX_X87, it is passed in memory. 3789 case X87: 3790 case ComplexX87: 3791 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3792 ++neededInt; 3793 return getIndirectResult(Ty, freeIntRegs); 3794 3795 case SSEUp: 3796 case X87Up: 3797 llvm_unreachable("Invalid classification for lo word."); 3798 3799 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3800 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3801 // and %r9 is used. 3802 case Integer: 3803 ++neededInt; 3804 3805 // Pick an 8-byte type based on the preferred type. 3806 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3807 3808 // If we have a sign or zero extended integer, make sure to return Extend 3809 // so that the parameter gets the right LLVM IR attributes. 3810 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3811 // Treat an enum type as its underlying type. 3812 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3813 Ty = EnumTy->getDecl()->getIntegerType(); 3814 3815 if (Ty->isIntegralOrEnumerationType() && 3816 isPromotableIntegerTypeForABI(Ty)) 3817 return ABIArgInfo::getExtend(Ty); 3818 } 3819 3820 break; 3821 3822 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3823 // available SSE register is used, the registers are taken in the 3824 // order from %xmm0 to %xmm7. 3825 case SSE: { 3826 llvm::Type *IRType = CGT.ConvertType(Ty); 3827 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3828 ++neededSSE; 3829 break; 3830 } 3831 } 3832 3833 llvm::Type *HighPart = nullptr; 3834 switch (Hi) { 3835 // Memory was handled previously, ComplexX87 and X87 should 3836 // never occur as hi classes, and X87Up must be preceded by X87, 3837 // which is passed in memory. 3838 case Memory: 3839 case X87: 3840 case ComplexX87: 3841 llvm_unreachable("Invalid classification for hi word."); 3842 3843 case NoClass: break; 3844 3845 case Integer: 3846 ++neededInt; 3847 // Pick an 8-byte type based on the preferred type. 3848 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3849 3850 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3851 return ABIArgInfo::getDirect(HighPart, 8); 3852 break; 3853 3854 // X87Up generally doesn't occur here (long double is passed in 3855 // memory), except in situations involving unions. 3856 case X87Up: 3857 case SSE: 3858 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3859 3860 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3861 return ABIArgInfo::getDirect(HighPart, 8); 3862 3863 ++neededSSE; 3864 break; 3865 3866 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3867 // eightbyte is passed in the upper half of the last used SSE 3868 // register. This only happens when 128-bit vectors are passed. 3869 case SSEUp: 3870 assert(Lo == SSE && "Unexpected SSEUp classification"); 3871 ResType = GetByteVectorType(Ty); 3872 break; 3873 } 3874 3875 // If a high part was specified, merge it together with the low part. It is 3876 // known to pass in the high eightbyte of the result. We do this by forming a 3877 // first class struct aggregate with the high and low part: {low, high} 3878 if (HighPart) 3879 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3880 3881 return ABIArgInfo::getDirect(ResType); 3882 } 3883 3884 ABIArgInfo 3885 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3886 unsigned &NeededSSE, 3887 unsigned &MaxVectorWidth) const { 3888 auto RT = Ty->getAs<RecordType>(); 3889 assert(RT && "classifyRegCallStructType only valid with struct types"); 3890 3891 if (RT->getDecl()->hasFlexibleArrayMember()) 3892 return getIndirectReturnResult(Ty); 3893 3894 // Sum up bases 3895 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3896 if (CXXRD->isDynamicClass()) { 3897 NeededInt = NeededSSE = 0; 3898 return getIndirectReturnResult(Ty); 3899 } 3900 3901 for (const auto &I : CXXRD->bases()) 3902 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE, 3903 MaxVectorWidth) 3904 .isIndirect()) { 3905 NeededInt = NeededSSE = 0; 3906 return getIndirectReturnResult(Ty); 3907 } 3908 } 3909 3910 // Sum up members 3911 for (const auto *FD : RT->getDecl()->fields()) { 3912 QualType MTy = FD->getType(); 3913 if (MTy->isRecordType() && !MTy->isUnionType()) { 3914 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE, 3915 MaxVectorWidth) 3916 .isIndirect()) { 3917 NeededInt = NeededSSE = 0; 3918 return getIndirectReturnResult(Ty); 3919 } 3920 } else { 3921 unsigned LocalNeededInt, LocalNeededSSE; 3922 if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE, 3923 true, true) 3924 .isIndirect()) { 3925 NeededInt = NeededSSE = 0; 3926 return getIndirectReturnResult(Ty); 3927 } 3928 if (const auto *AT = getContext().getAsConstantArrayType(MTy)) 3929 MTy = AT->getElementType(); 3930 if (const auto *VT = MTy->getAs<VectorType>()) 3931 if (getContext().getTypeSize(VT) > MaxVectorWidth) 3932 MaxVectorWidth = getContext().getTypeSize(VT); 3933 NeededInt += LocalNeededInt; 3934 NeededSSE += LocalNeededSSE; 3935 } 3936 } 3937 3938 return ABIArgInfo::getDirect(); 3939 } 3940 3941 ABIArgInfo 3942 X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 3943 unsigned &NeededSSE, 3944 unsigned &MaxVectorWidth) const { 3945 3946 NeededInt = 0; 3947 NeededSSE = 0; 3948 MaxVectorWidth = 0; 3949 3950 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE, 3951 MaxVectorWidth); 3952 } 3953 3954 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3955 3956 const unsigned CallingConv = FI.getCallingConvention(); 3957 // It is possible to force Win64 calling convention on any x86_64 target by 3958 // using __attribute__((ms_abi)). In such case to correctly emit Win64 3959 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. 3960 if (CallingConv == llvm::CallingConv::Win64) { 3961 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); 3962 Win64ABIInfo.computeInfo(FI); 3963 return; 3964 } 3965 3966 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; 3967 3968 // Keep track of the number of assigned registers. 3969 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3970 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3971 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0; 3972 3973 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 3974 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3975 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3976 FI.getReturnInfo() = classifyRegCallStructType( 3977 FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth); 3978 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3979 FreeIntRegs -= NeededInt; 3980 FreeSSERegs -= NeededSSE; 3981 } else { 3982 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3983 } 3984 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() && 3985 getContext().getCanonicalType(FI.getReturnType() 3986 ->getAs<ComplexType>() 3987 ->getElementType()) == 3988 getContext().LongDoubleTy) 3989 // Complex Long Double Type is passed in Memory when Regcall 3990 // calling convention is used. 3991 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3992 else 3993 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3994 } 3995 3996 // If the return value is indirect, then the hidden argument is consuming one 3997 // integer register. 3998 if (FI.getReturnInfo().isIndirect()) 3999 --FreeIntRegs; 4000 else if (NeededSSE && MaxVectorWidth > 0) 4001 FI.setMaxVectorWidth(MaxVectorWidth); 4002 4003 // The chain argument effectively gives us another free register. 4004 if (FI.isChainCall()) 4005 ++FreeIntRegs; 4006 4007 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 4008 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 4009 // get assigned (in left-to-right order) for passing as follows... 4010 unsigned ArgNo = 0; 4011 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4012 it != ie; ++it, ++ArgNo) { 4013 bool IsNamedArg = ArgNo < NumRequiredArgs; 4014 4015 if (IsRegCall && it->type->isStructureOrClassType()) 4016 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE, 4017 MaxVectorWidth); 4018 else 4019 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 4020 NeededSSE, IsNamedArg); 4021 4022 // AMD64-ABI 3.2.3p3: If there are no registers available for any 4023 // eightbyte of an argument, the whole argument is passed on the 4024 // stack. If registers have already been assigned for some 4025 // eightbytes of such an argument, the assignments get reverted. 4026 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 4027 FreeIntRegs -= NeededInt; 4028 FreeSSERegs -= NeededSSE; 4029 if (MaxVectorWidth > FI.getMaxVectorWidth()) 4030 FI.setMaxVectorWidth(MaxVectorWidth); 4031 } else { 4032 it->info = getIndirectResult(it->type, FreeIntRegs); 4033 } 4034 } 4035 } 4036 4037 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 4038 Address VAListAddr, QualType Ty) { 4039 Address overflow_arg_area_p = 4040 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 4041 llvm::Value *overflow_arg_area = 4042 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 4043 4044 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 4045 // byte boundary if alignment needed by type exceeds 8 byte boundary. 4046 // It isn't stated explicitly in the standard, but in practice we use 4047 // alignment greater than 16 where necessary. 4048 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4049 if (Align > CharUnits::fromQuantity(8)) { 4050 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 4051 Align); 4052 } 4053 4054 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 4055 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 4056 llvm::Value *Res = 4057 CGF.Builder.CreateBitCast(overflow_arg_area, 4058 llvm::PointerType::getUnqual(LTy)); 4059 4060 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 4061 // l->overflow_arg_area + sizeof(type). 4062 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 4063 // an 8 byte boundary. 4064 4065 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 4066 llvm::Value *Offset = 4067 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 4068 overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area, 4069 Offset, "overflow_arg_area.next"); 4070 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 4071 4072 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 4073 return Address(Res, LTy, Align); 4074 } 4075 4076 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4077 QualType Ty) const { 4078 // Assume that va_list type is correct; should be pointer to LLVM type: 4079 // struct { 4080 // i32 gp_offset; 4081 // i32 fp_offset; 4082 // i8* overflow_arg_area; 4083 // i8* reg_save_area; 4084 // }; 4085 unsigned neededInt, neededSSE; 4086 4087 Ty = getContext().getCanonicalType(Ty); 4088 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 4089 /*isNamedArg*/false); 4090 4091 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 4092 // in the registers. If not go to step 7. 4093 if (!neededInt && !neededSSE) 4094 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 4095 4096 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 4097 // general purpose registers needed to pass type and num_fp to hold 4098 // the number of floating point registers needed. 4099 4100 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 4101 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 4102 // l->fp_offset > 304 - num_fp * 16 go to step 7. 4103 // 4104 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 4105 // register save space). 4106 4107 llvm::Value *InRegs = nullptr; 4108 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 4109 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 4110 if (neededInt) { 4111 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 4112 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 4113 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 4114 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 4115 } 4116 4117 if (neededSSE) { 4118 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 4119 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 4120 llvm::Value *FitsInFP = 4121 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 4122 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 4123 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 4124 } 4125 4126 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4127 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 4128 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4129 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 4130 4131 // Emit code to load the value if it was passed in registers. 4132 4133 CGF.EmitBlock(InRegBlock); 4134 4135 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 4136 // an offset of l->gp_offset and/or l->fp_offset. This may require 4137 // copying to a temporary location in case the parameter is passed 4138 // in different register classes or requires an alignment greater 4139 // than 8 for general purpose registers and 16 for XMM registers. 4140 // 4141 // FIXME: This really results in shameful code when we end up needing to 4142 // collect arguments from different places; often what should result in a 4143 // simple assembling of a structure from scattered addresses has many more 4144 // loads than necessary. Can we clean this up? 4145 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 4146 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 4147 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); 4148 4149 Address RegAddr = Address::invalid(); 4150 if (neededInt && neededSSE) { 4151 // FIXME: Cleanup. 4152 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 4153 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 4154 Address Tmp = CGF.CreateMemTemp(Ty); 4155 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4156 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 4157 llvm::Type *TyLo = ST->getElementType(0); 4158 llvm::Type *TyHi = ST->getElementType(1); 4159 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 4160 "Unexpected ABI info for mixed regs"); 4161 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 4162 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 4163 llvm::Value *GPAddr = 4164 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset); 4165 llvm::Value *FPAddr = 4166 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset); 4167 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 4168 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 4169 4170 // Copy the first element. 4171 // FIXME: Our choice of alignment here and below is probably pessimistic. 4172 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 4173 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 4174 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 4175 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4176 4177 // Copy the second element. 4178 V = CGF.Builder.CreateAlignedLoad( 4179 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 4180 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 4181 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4182 4183 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4184 } else if (neededInt) { 4185 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset), 4186 CGF.Int8Ty, CharUnits::fromQuantity(8)); 4187 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4188 4189 // Copy to a temporary if necessary to ensure the appropriate alignment. 4190 auto TInfo = getContext().getTypeInfoInChars(Ty); 4191 uint64_t TySize = TInfo.Width.getQuantity(); 4192 CharUnits TyAlign = TInfo.Align; 4193 4194 // Copy into a temporary if the type is more aligned than the 4195 // register save area. 4196 if (TyAlign.getQuantity() > 8) { 4197 Address Tmp = CGF.CreateMemTemp(Ty); 4198 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 4199 RegAddr = Tmp; 4200 } 4201 4202 } else if (neededSSE == 1) { 4203 RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset), 4204 CGF.Int8Ty, CharUnits::fromQuantity(16)); 4205 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4206 } else { 4207 assert(neededSSE == 2 && "Invalid number of needed registers!"); 4208 // SSE registers are spaced 16 bytes apart in the register save 4209 // area, we need to collect the two eightbytes together. 4210 // The ABI isn't explicit about this, but it seems reasonable 4211 // to assume that the slots are 16-byte aligned, since the stack is 4212 // naturally 16-byte aligned and the prologue is expected to store 4213 // all the SSE registers to the RSA. 4214 Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, 4215 fp_offset), 4216 CGF.Int8Ty, CharUnits::fromQuantity(16)); 4217 Address RegAddrHi = 4218 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 4219 CharUnits::fromQuantity(16)); 4220 llvm::Type *ST = AI.canHaveCoerceToType() 4221 ? AI.getCoerceToType() 4222 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); 4223 llvm::Value *V; 4224 Address Tmp = CGF.CreateMemTemp(Ty); 4225 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4226 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4227 RegAddrLo, ST->getStructElementType(0))); 4228 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4229 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4230 RegAddrHi, ST->getStructElementType(1))); 4231 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4232 4233 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4234 } 4235 4236 // AMD64-ABI 3.5.7p5: Step 5. Set: 4237 // l->gp_offset = l->gp_offset + num_gp * 8 4238 // l->fp_offset = l->fp_offset + num_fp * 16. 4239 if (neededInt) { 4240 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 4241 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 4242 gp_offset_p); 4243 } 4244 if (neededSSE) { 4245 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 4246 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 4247 fp_offset_p); 4248 } 4249 CGF.EmitBranch(ContBlock); 4250 4251 // Emit code to load the value if it was passed in memory. 4252 4253 CGF.EmitBlock(InMemBlock); 4254 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 4255 4256 // Return the appropriate result. 4257 4258 CGF.EmitBlock(ContBlock); 4259 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 4260 "vaarg.addr"); 4261 return ResAddr; 4262 } 4263 4264 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4265 QualType Ty) const { 4266 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4267 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4268 uint64_t Width = getContext().getTypeSize(Ty); 4269 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4270 4271 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4272 CGF.getContext().getTypeInfoInChars(Ty), 4273 CharUnits::fromQuantity(8), 4274 /*allowHigherAlign*/ false); 4275 } 4276 4277 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall( 4278 QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo ¤t) const { 4279 const Type *Base = nullptr; 4280 uint64_t NumElts = 0; 4281 4282 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 4283 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 4284 FreeSSERegs -= NumElts; 4285 return getDirectX86Hva(); 4286 } 4287 return current; 4288 } 4289 4290 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 4291 bool IsReturnType, bool IsVectorCall, 4292 bool IsRegCall) const { 4293 4294 if (Ty->isVoidType()) 4295 return ABIArgInfo::getIgnore(); 4296 4297 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4298 Ty = EnumTy->getDecl()->getIntegerType(); 4299 4300 TypeInfo Info = getContext().getTypeInfo(Ty); 4301 uint64_t Width = Info.Width; 4302 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 4303 4304 const RecordType *RT = Ty->getAs<RecordType>(); 4305 if (RT) { 4306 if (!IsReturnType) { 4307 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 4308 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4309 } 4310 4311 if (RT->getDecl()->hasFlexibleArrayMember()) 4312 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4313 4314 } 4315 4316 const Type *Base = nullptr; 4317 uint64_t NumElts = 0; 4318 // vectorcall adds the concept of a homogenous vector aggregate, similar to 4319 // other targets. 4320 if ((IsVectorCall || IsRegCall) && 4321 isHomogeneousAggregate(Ty, Base, NumElts)) { 4322 if (IsRegCall) { 4323 if (FreeSSERegs >= NumElts) { 4324 FreeSSERegs -= NumElts; 4325 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 4326 return ABIArgInfo::getDirect(); 4327 return ABIArgInfo::getExpand(); 4328 } 4329 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4330 } else if (IsVectorCall) { 4331 if (FreeSSERegs >= NumElts && 4332 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 4333 FreeSSERegs -= NumElts; 4334 return ABIArgInfo::getDirect(); 4335 } else if (IsReturnType) { 4336 return ABIArgInfo::getExpand(); 4337 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 4338 // HVAs are delayed and reclassified in the 2nd step. 4339 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4340 } 4341 } 4342 } 4343 4344 if (Ty->isMemberPointerType()) { 4345 // If the member pointer is represented by an LLVM int or ptr, pass it 4346 // directly. 4347 llvm::Type *LLTy = CGT.ConvertType(Ty); 4348 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 4349 return ABIArgInfo::getDirect(); 4350 } 4351 4352 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 4353 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4354 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4355 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 4356 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4357 4358 // Otherwise, coerce it to a small integer. 4359 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 4360 } 4361 4362 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4363 switch (BT->getKind()) { 4364 case BuiltinType::Bool: 4365 // Bool type is always extended to the ABI, other builtin types are not 4366 // extended. 4367 return ABIArgInfo::getExtend(Ty); 4368 4369 case BuiltinType::LongDouble: 4370 // Mingw64 GCC uses the old 80 bit extended precision floating point 4371 // unit. It passes them indirectly through memory. 4372 if (IsMingw64) { 4373 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 4374 if (LDF == &llvm::APFloat::x87DoubleExtended()) 4375 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4376 } 4377 break; 4378 4379 case BuiltinType::Int128: 4380 case BuiltinType::UInt128: 4381 // If it's a parameter type, the normal ABI rule is that arguments larger 4382 // than 8 bytes are passed indirectly. GCC follows it. We follow it too, 4383 // even though it isn't particularly efficient. 4384 if (!IsReturnType) 4385 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4386 4387 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that. 4388 // Clang matches them for compatibility. 4389 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 4390 llvm::Type::getInt64Ty(getVMContext()), 2)); 4391 4392 default: 4393 break; 4394 } 4395 } 4396 4397 if (Ty->isBitIntType()) { 4398 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4399 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4400 // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4, 4401 // or 8 bytes anyway as long is it fits in them, so we don't have to check 4402 // the power of 2. 4403 if (Width <= 64) 4404 return ABIArgInfo::getDirect(); 4405 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4406 } 4407 4408 return ABIArgInfo::getDirect(); 4409 } 4410 4411 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 4412 const unsigned CC = FI.getCallingConvention(); 4413 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; 4414 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; 4415 4416 // If __attribute__((sysv_abi)) is in use, use the SysV argument 4417 // classification rules. 4418 if (CC == llvm::CallingConv::X86_64_SysV) { 4419 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); 4420 SysVABIInfo.computeInfo(FI); 4421 return; 4422 } 4423 4424 unsigned FreeSSERegs = 0; 4425 if (IsVectorCall) { 4426 // We can use up to 4 SSE return registers with vectorcall. 4427 FreeSSERegs = 4; 4428 } else if (IsRegCall) { 4429 // RegCall gives us 16 SSE registers. 4430 FreeSSERegs = 16; 4431 } 4432 4433 if (!getCXXABI().classifyReturnType(FI)) 4434 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 4435 IsVectorCall, IsRegCall); 4436 4437 if (IsVectorCall) { 4438 // We can use up to 6 SSE register parameters with vectorcall. 4439 FreeSSERegs = 6; 4440 } else if (IsRegCall) { 4441 // RegCall gives us 16 SSE registers, we can reuse the return registers. 4442 FreeSSERegs = 16; 4443 } 4444 4445 unsigned ArgNum = 0; 4446 unsigned ZeroSSERegs = 0; 4447 for (auto &I : FI.arguments()) { 4448 // Vectorcall in x64 only permits the first 6 arguments to be passed as 4449 // XMM/YMM registers. After the sixth argument, pretend no vector 4450 // registers are left. 4451 unsigned *MaybeFreeSSERegs = 4452 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs; 4453 I.info = 4454 classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall); 4455 ++ArgNum; 4456 } 4457 4458 if (IsVectorCall) { 4459 // For vectorcall, assign aggregate HVAs to any free vector registers in a 4460 // second pass. 4461 for (auto &I : FI.arguments()) 4462 I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info); 4463 } 4464 } 4465 4466 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4467 QualType Ty) const { 4468 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4469 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4470 uint64_t Width = getContext().getTypeSize(Ty); 4471 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4472 4473 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4474 CGF.getContext().getTypeInfoInChars(Ty), 4475 CharUnits::fromQuantity(8), 4476 /*allowHigherAlign*/ false); 4477 } 4478 4479 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4480 llvm::Value *Address, bool Is64Bit, 4481 bool IsAIX) { 4482 // This is calculated from the LLVM and GCC tables and verified 4483 // against gcc output. AFAIK all PPC ABIs use the same encoding. 4484 4485 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4486 4487 llvm::IntegerType *i8 = CGF.Int8Ty; 4488 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4489 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4490 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4491 4492 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers 4493 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31); 4494 4495 // 32-63: fp0-31, the 8-byte floating-point registers 4496 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4497 4498 // 64-67 are various 4-byte or 8-byte special-purpose registers: 4499 // 64: mq 4500 // 65: lr 4501 // 66: ctr 4502 // 67: ap 4503 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67); 4504 4505 // 68-76 are various 4-byte special-purpose registers: 4506 // 68-75 cr0-7 4507 // 76: xer 4508 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4509 4510 // 77-108: v0-31, the 16-byte vector registers 4511 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4512 4513 // 109: vrsave 4514 // 110: vscr 4515 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110); 4516 4517 // AIX does not utilize the rest of the registers. 4518 if (IsAIX) 4519 return false; 4520 4521 // 111: spe_acc 4522 // 112: spefscr 4523 // 113: sfp 4524 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113); 4525 4526 if (!Is64Bit) 4527 return false; 4528 4529 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8 4530 // or above CPU. 4531 // 64-bit only registers: 4532 // 114: tfhar 4533 // 115: tfiar 4534 // 116: texasr 4535 AssignToArrayRange(Builder, Address, Eight8, 114, 116); 4536 4537 return false; 4538 } 4539 4540 // AIX 4541 namespace { 4542 /// AIXABIInfo - The AIX XCOFF ABI information. 4543 class AIXABIInfo : public ABIInfo { 4544 const bool Is64Bit; 4545 const unsigned PtrByteSize; 4546 CharUnits getParamTypeAlignment(QualType Ty) const; 4547 4548 public: 4549 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4550 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} 4551 4552 bool isPromotableTypeForABI(QualType Ty) const; 4553 4554 ABIArgInfo classifyReturnType(QualType RetTy) const; 4555 ABIArgInfo classifyArgumentType(QualType Ty) const; 4556 4557 void computeInfo(CGFunctionInfo &FI) const override { 4558 if (!getCXXABI().classifyReturnType(FI)) 4559 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4560 4561 for (auto &I : FI.arguments()) 4562 I.info = classifyArgumentType(I.type); 4563 } 4564 4565 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4566 QualType Ty) const override; 4567 }; 4568 4569 class AIXTargetCodeGenInfo : public TargetCodeGenInfo { 4570 const bool Is64Bit; 4571 4572 public: 4573 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4574 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)), 4575 Is64Bit(Is64Bit) {} 4576 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4577 return 1; // r1 is the dedicated stack pointer 4578 } 4579 4580 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4581 llvm::Value *Address) const override; 4582 }; 4583 } // namespace 4584 4585 // Return true if the ABI requires Ty to be passed sign- or zero- 4586 // extended to 32/64 bits. 4587 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { 4588 // Treat an enum type as its underlying type. 4589 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4590 Ty = EnumTy->getDecl()->getIntegerType(); 4591 4592 // Promotable integer types are required to be promoted by the ABI. 4593 if (Ty->isPromotableIntegerType()) 4594 return true; 4595 4596 if (!Is64Bit) 4597 return false; 4598 4599 // For 64 bit mode, in addition to the usual promotable integer types, we also 4600 // need to extend all 32-bit types, since the ABI requires promotion to 64 4601 // bits. 4602 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4603 switch (BT->getKind()) { 4604 case BuiltinType::Int: 4605 case BuiltinType::UInt: 4606 return true; 4607 default: 4608 break; 4609 } 4610 4611 return false; 4612 } 4613 4614 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { 4615 if (RetTy->isAnyComplexType()) 4616 return ABIArgInfo::getDirect(); 4617 4618 if (RetTy->isVectorType()) 4619 return ABIArgInfo::getDirect(); 4620 4621 if (RetTy->isVoidType()) 4622 return ABIArgInfo::getIgnore(); 4623 4624 if (isAggregateTypeForABI(RetTy)) 4625 return getNaturalAlignIndirect(RetTy); 4626 4627 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 4628 : ABIArgInfo::getDirect()); 4629 } 4630 4631 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { 4632 Ty = useFirstFieldIfTransparentUnion(Ty); 4633 4634 if (Ty->isAnyComplexType()) 4635 return ABIArgInfo::getDirect(); 4636 4637 if (Ty->isVectorType()) 4638 return ABIArgInfo::getDirect(); 4639 4640 if (isAggregateTypeForABI(Ty)) { 4641 // Records with non-trivial destructors/copy-constructors should not be 4642 // passed by value. 4643 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4644 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4645 4646 CharUnits CCAlign = getParamTypeAlignment(Ty); 4647 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty); 4648 4649 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true, 4650 /*Realign*/ TyAlign > CCAlign); 4651 } 4652 4653 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 4654 : ABIArgInfo::getDirect()); 4655 } 4656 4657 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { 4658 // Complex types are passed just like their elements. 4659 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4660 Ty = CTy->getElementType(); 4661 4662 if (Ty->isVectorType()) 4663 return CharUnits::fromQuantity(16); 4664 4665 // If the structure contains a vector type, the alignment is 16. 4666 if (isRecordWithSIMDVectorType(getContext(), Ty)) 4667 return CharUnits::fromQuantity(16); 4668 4669 return CharUnits::fromQuantity(PtrByteSize); 4670 } 4671 4672 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4673 QualType Ty) const { 4674 4675 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4676 TypeInfo.Align = getParamTypeAlignment(Ty); 4677 4678 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize); 4679 4680 // If we have a complex type and the base type is smaller than the register 4681 // size, the ABI calls for the real and imaginary parts to be right-adjusted 4682 // in separate words in 32bit mode or doublewords in 64bit mode. However, 4683 // Clang expects us to produce a pointer to a structure with the two parts 4684 // packed tightly. So generate loads of the real and imaginary parts relative 4685 // to the va_list pointer, and store them to a temporary structure. We do the 4686 // same as the PPC64ABI here. 4687 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4688 CharUnits EltSize = TypeInfo.Width / 2; 4689 if (EltSize < SlotSize) 4690 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); 4691 } 4692 4693 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo, 4694 SlotSize, /*AllowHigher*/ true); 4695 } 4696 4697 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( 4698 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { 4699 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true); 4700 } 4701 4702 // PowerPC-32 4703 namespace { 4704 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4705 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4706 bool IsSoftFloatABI; 4707 bool IsRetSmallStructInRegABI; 4708 4709 CharUnits getParamTypeAlignment(QualType Ty) const; 4710 4711 public: 4712 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, 4713 bool RetSmallStructInRegABI) 4714 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), 4715 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} 4716 4717 ABIArgInfo classifyReturnType(QualType RetTy) const; 4718 4719 void computeInfo(CGFunctionInfo &FI) const override { 4720 if (!getCXXABI().classifyReturnType(FI)) 4721 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4722 for (auto &I : FI.arguments()) 4723 I.info = classifyArgumentType(I.type); 4724 } 4725 4726 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4727 QualType Ty) const override; 4728 }; 4729 4730 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4731 public: 4732 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, 4733 bool RetSmallStructInRegABI) 4734 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( 4735 CGT, SoftFloatABI, RetSmallStructInRegABI)) {} 4736 4737 static bool isStructReturnInRegABI(const llvm::Triple &Triple, 4738 const CodeGenOptions &Opts); 4739 4740 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4741 // This is recovered from gcc output. 4742 return 1; // r1 is the dedicated stack pointer 4743 } 4744 4745 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4746 llvm::Value *Address) const override; 4747 }; 4748 } 4749 4750 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4751 // Complex types are passed just like their elements. 4752 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4753 Ty = CTy->getElementType(); 4754 4755 if (Ty->isVectorType()) 4756 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 4757 : 4); 4758 4759 // For single-element float/vector structs, we consider the whole type 4760 // to have the same alignment requirements as its single element. 4761 const Type *AlignTy = nullptr; 4762 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { 4763 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4764 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || 4765 (BT && BT->isFloatingPoint())) 4766 AlignTy = EltType; 4767 } 4768 4769 if (AlignTy) 4770 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); 4771 return CharUnits::fromQuantity(4); 4772 } 4773 4774 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4775 uint64_t Size; 4776 4777 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4. 4778 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI && 4779 (Size = getContext().getTypeSize(RetTy)) <= 64) { 4780 // System V ABI (1995), page 3-22, specified: 4781 // > A structure or union whose size is less than or equal to 8 bytes 4782 // > shall be returned in r3 and r4, as if it were first stored in the 4783 // > 8-byte aligned memory area and then the low addressed word were 4784 // > loaded into r3 and the high-addressed word into r4. Bits beyond 4785 // > the last member of the structure or union are not defined. 4786 // 4787 // GCC for big-endian PPC32 inserts the pad before the first member, 4788 // not "beyond the last member" of the struct. To stay compatible 4789 // with GCC, we coerce the struct to an integer of the same size. 4790 // LLVM will extend it and return i32 in r3, or i64 in r3:r4. 4791 if (Size == 0) 4792 return ABIArgInfo::getIgnore(); 4793 else { 4794 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size); 4795 return ABIArgInfo::getDirect(CoerceTy); 4796 } 4797 } 4798 4799 return DefaultABIInfo::classifyReturnType(RetTy); 4800 } 4801 4802 // TODO: this implementation is now likely redundant with 4803 // DefaultABIInfo::EmitVAArg. 4804 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4805 QualType Ty) const { 4806 if (getTarget().getTriple().isOSDarwin()) { 4807 auto TI = getContext().getTypeInfoInChars(Ty); 4808 TI.Align = getParamTypeAlignment(Ty); 4809 4810 CharUnits SlotSize = CharUnits::fromQuantity(4); 4811 return emitVoidPtrVAArg(CGF, VAList, Ty, 4812 classifyArgumentType(Ty).isIndirect(), TI, SlotSize, 4813 /*AllowHigherAlign=*/true); 4814 } 4815 4816 const unsigned OverflowLimit = 8; 4817 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4818 // TODO: Implement this. For now ignore. 4819 (void)CTy; 4820 return Address::invalid(); // FIXME? 4821 } 4822 4823 // struct __va_list_tag { 4824 // unsigned char gpr; 4825 // unsigned char fpr; 4826 // unsigned short reserved; 4827 // void *overflow_arg_area; 4828 // void *reg_save_area; 4829 // }; 4830 4831 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4832 bool isInt = !Ty->isFloatingType(); 4833 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4834 4835 // All aggregates are passed indirectly? That doesn't seem consistent 4836 // with the argument-lowering code. 4837 bool isIndirect = isAggregateTypeForABI(Ty); 4838 4839 CGBuilderTy &Builder = CGF.Builder; 4840 4841 // The calling convention either uses 1-2 GPRs or 1 FPR. 4842 Address NumRegsAddr = Address::invalid(); 4843 if (isInt || IsSoftFloatABI) { 4844 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); 4845 } else { 4846 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); 4847 } 4848 4849 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4850 4851 // "Align" the register count when TY is i64. 4852 if (isI64 || (isF64 && IsSoftFloatABI)) { 4853 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4854 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4855 } 4856 4857 llvm::Value *CC = 4858 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4859 4860 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4861 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4862 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4863 4864 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4865 4866 llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy; 4867 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4868 4869 // Case 1: consume registers. 4870 Address RegAddr = Address::invalid(); 4871 { 4872 CGF.EmitBlock(UsingRegs); 4873 4874 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); 4875 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty, 4876 CharUnits::fromQuantity(8)); 4877 assert(RegAddr.getElementType() == CGF.Int8Ty); 4878 4879 // Floating-point registers start after the general-purpose registers. 4880 if (!(isInt || IsSoftFloatABI)) { 4881 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4882 CharUnits::fromQuantity(32)); 4883 } 4884 4885 // Get the address of the saved value by scaling the number of 4886 // registers we've used by the number of 4887 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4888 llvm::Value *RegOffset = 4889 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4890 RegAddr = Address( 4891 Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset), 4892 CGF.Int8Ty, RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4893 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4894 4895 // Increase the used-register count. 4896 NumRegs = 4897 Builder.CreateAdd(NumRegs, 4898 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4899 Builder.CreateStore(NumRegs, NumRegsAddr); 4900 4901 CGF.EmitBranch(Cont); 4902 } 4903 4904 // Case 2: consume space in the overflow area. 4905 Address MemAddr = Address::invalid(); 4906 { 4907 CGF.EmitBlock(UsingOverflow); 4908 4909 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4910 4911 // Everything in the overflow area is rounded up to a size of at least 4. 4912 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4913 4914 CharUnits Size; 4915 if (!isIndirect) { 4916 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4917 Size = TypeInfo.Width.alignTo(OverflowAreaAlign); 4918 } else { 4919 Size = CGF.getPointerSize(); 4920 } 4921 4922 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); 4923 Address OverflowArea = 4924 Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty, 4925 OverflowAreaAlign); 4926 // Round up address of argument to alignment 4927 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4928 if (Align > OverflowAreaAlign) { 4929 llvm::Value *Ptr = OverflowArea.getPointer(); 4930 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4931 OverflowArea.getElementType(), Align); 4932 } 4933 4934 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4935 4936 // Increase the overflow area. 4937 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4938 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4939 CGF.EmitBranch(Cont); 4940 } 4941 4942 CGF.EmitBlock(Cont); 4943 4944 // Merge the cases with a phi. 4945 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4946 "vaarg.addr"); 4947 4948 // Load the pointer if the argument was passed indirectly. 4949 if (isIndirect) { 4950 Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy, 4951 getContext().getTypeAlignInChars(Ty)); 4952 } 4953 4954 return Result; 4955 } 4956 4957 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( 4958 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 4959 assert(Triple.isPPC32()); 4960 4961 switch (Opts.getStructReturnConvention()) { 4962 case CodeGenOptions::SRCK_Default: 4963 break; 4964 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return 4965 return false; 4966 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return 4967 return true; 4968 } 4969 4970 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) 4971 return true; 4972 4973 return false; 4974 } 4975 4976 bool 4977 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4978 llvm::Value *Address) const { 4979 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false, 4980 /*IsAIX*/ false); 4981 } 4982 4983 // PowerPC-64 4984 4985 namespace { 4986 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4987 class PPC64_SVR4_ABIInfo : public SwiftABIInfo { 4988 public: 4989 enum ABIKind { 4990 ELFv1 = 0, 4991 ELFv2 4992 }; 4993 4994 private: 4995 static const unsigned GPRBits = 64; 4996 ABIKind Kind; 4997 bool IsSoftFloatABI; 4998 4999 public: 5000 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, 5001 bool SoftFloatABI) 5002 : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {} 5003 5004 bool isPromotableTypeForABI(QualType Ty) const; 5005 CharUnits getParamTypeAlignment(QualType Ty) const; 5006 5007 ABIArgInfo classifyReturnType(QualType RetTy) const; 5008 ABIArgInfo classifyArgumentType(QualType Ty) const; 5009 5010 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5011 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5012 uint64_t Members) const override; 5013 5014 // TODO: We can add more logic to computeInfo to improve performance. 5015 // Example: For aggregate arguments that fit in a register, we could 5016 // use getDirectInReg (as is done below for structs containing a single 5017 // floating-point value) to avoid pushing them to memory on function 5018 // entry. This would require changing the logic in PPCISelLowering 5019 // when lowering the parameters in the caller and args in the callee. 5020 void computeInfo(CGFunctionInfo &FI) const override { 5021 if (!getCXXABI().classifyReturnType(FI)) 5022 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5023 for (auto &I : FI.arguments()) { 5024 // We rely on the default argument classification for the most part. 5025 // One exception: An aggregate containing a single floating-point 5026 // or vector item must be passed in a register if one is available. 5027 const Type *T = isSingleElementStruct(I.type, getContext()); 5028 if (T) { 5029 const BuiltinType *BT = T->getAs<BuiltinType>(); 5030 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || 5031 (BT && BT->isFloatingPoint())) { 5032 QualType QT(T, 0); 5033 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 5034 continue; 5035 } 5036 } 5037 I.info = classifyArgumentType(I.type); 5038 } 5039 } 5040 5041 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5042 QualType Ty) const override; 5043 5044 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 5045 bool asReturnValue) const override { 5046 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5047 } 5048 5049 bool isSwiftErrorInRegister() const override { 5050 return false; 5051 } 5052 }; 5053 5054 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 5055 5056 public: 5057 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 5058 PPC64_SVR4_ABIInfo::ABIKind Kind, 5059 bool SoftFloatABI) 5060 : TargetCodeGenInfo( 5061 std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {} 5062 5063 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5064 // This is recovered from gcc output. 5065 return 1; // r1 is the dedicated stack pointer 5066 } 5067 5068 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5069 llvm::Value *Address) const override; 5070 }; 5071 5072 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 5073 public: 5074 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 5075 5076 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5077 // This is recovered from gcc output. 5078 return 1; // r1 is the dedicated stack pointer 5079 } 5080 5081 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5082 llvm::Value *Address) const override; 5083 }; 5084 5085 } 5086 5087 // Return true if the ABI requires Ty to be passed sign- or zero- 5088 // extended to 64 bits. 5089 bool 5090 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 5091 // Treat an enum type as its underlying type. 5092 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5093 Ty = EnumTy->getDecl()->getIntegerType(); 5094 5095 // Promotable integer types are required to be promoted by the ABI. 5096 if (isPromotableIntegerTypeForABI(Ty)) 5097 return true; 5098 5099 // In addition to the usual promotable integer types, we also need to 5100 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 5101 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5102 switch (BT->getKind()) { 5103 case BuiltinType::Int: 5104 case BuiltinType::UInt: 5105 return true; 5106 default: 5107 break; 5108 } 5109 5110 if (const auto *EIT = Ty->getAs<BitIntType>()) 5111 if (EIT->getNumBits() < 64) 5112 return true; 5113 5114 return false; 5115 } 5116 5117 /// isAlignedParamType - Determine whether a type requires 16-byte or 5118 /// higher alignment in the parameter area. Always returns at least 8. 5119 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 5120 // Complex types are passed just like their elements. 5121 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 5122 Ty = CTy->getElementType(); 5123 5124 auto FloatUsesVector = [this](QualType Ty){ 5125 return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics( 5126 Ty) == &llvm::APFloat::IEEEquad(); 5127 }; 5128 5129 // Only vector types of size 16 bytes need alignment (larger types are 5130 // passed via reference, smaller types are not aligned). 5131 if (Ty->isVectorType()) { 5132 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 5133 } else if (FloatUsesVector(Ty)) { 5134 // According to ABI document section 'Optional Save Areas': If extended 5135 // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION 5136 // format are supported, map them to a single quadword, quadword aligned. 5137 return CharUnits::fromQuantity(16); 5138 } 5139 5140 // For single-element float/vector structs, we consider the whole type 5141 // to have the same alignment requirements as its single element. 5142 const Type *AlignAsType = nullptr; 5143 const Type *EltType = isSingleElementStruct(Ty, getContext()); 5144 if (EltType) { 5145 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 5146 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || 5147 (BT && BT->isFloatingPoint())) 5148 AlignAsType = EltType; 5149 } 5150 5151 // Likewise for ELFv2 homogeneous aggregates. 5152 const Type *Base = nullptr; 5153 uint64_t Members = 0; 5154 if (!AlignAsType && Kind == ELFv2 && 5155 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 5156 AlignAsType = Base; 5157 5158 // With special case aggregates, only vector base types need alignment. 5159 if (AlignAsType) { 5160 bool UsesVector = AlignAsType->isVectorType() || 5161 FloatUsesVector(QualType(AlignAsType, 0)); 5162 return CharUnits::fromQuantity(UsesVector ? 16 : 8); 5163 } 5164 5165 // Otherwise, we only need alignment for any aggregate type that 5166 // has an alignment requirement of >= 16 bytes. 5167 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 5168 return CharUnits::fromQuantity(16); 5169 } 5170 5171 return CharUnits::fromQuantity(8); 5172 } 5173 5174 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 5175 /// aggregate. Base is set to the base element type, and Members is set 5176 /// to the number of base elements. 5177 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 5178 uint64_t &Members) const { 5179 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 5180 uint64_t NElements = AT->getSize().getZExtValue(); 5181 if (NElements == 0) 5182 return false; 5183 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 5184 return false; 5185 Members *= NElements; 5186 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 5187 const RecordDecl *RD = RT->getDecl(); 5188 if (RD->hasFlexibleArrayMember()) 5189 return false; 5190 5191 Members = 0; 5192 5193 // If this is a C++ record, check the properties of the record such as 5194 // bases and ABI specific restrictions 5195 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 5196 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD)) 5197 return false; 5198 5199 for (const auto &I : CXXRD->bases()) { 5200 // Ignore empty records. 5201 if (isEmptyRecord(getContext(), I.getType(), true)) 5202 continue; 5203 5204 uint64_t FldMembers; 5205 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 5206 return false; 5207 5208 Members += FldMembers; 5209 } 5210 } 5211 5212 for (const auto *FD : RD->fields()) { 5213 // Ignore (non-zero arrays of) empty records. 5214 QualType FT = FD->getType(); 5215 while (const ConstantArrayType *AT = 5216 getContext().getAsConstantArrayType(FT)) { 5217 if (AT->getSize().getZExtValue() == 0) 5218 return false; 5219 FT = AT->getElementType(); 5220 } 5221 if (isEmptyRecord(getContext(), FT, true)) 5222 continue; 5223 5224 if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() && 5225 FD->isZeroLengthBitField(getContext())) 5226 continue; 5227 5228 uint64_t FldMembers; 5229 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 5230 return false; 5231 5232 Members = (RD->isUnion() ? 5233 std::max(Members, FldMembers) : Members + FldMembers); 5234 } 5235 5236 if (!Base) 5237 return false; 5238 5239 // Ensure there is no padding. 5240 if (getContext().getTypeSize(Base) * Members != 5241 getContext().getTypeSize(Ty)) 5242 return false; 5243 } else { 5244 Members = 1; 5245 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 5246 Members = 2; 5247 Ty = CT->getElementType(); 5248 } 5249 5250 // Most ABIs only support float, double, and some vector type widths. 5251 if (!isHomogeneousAggregateBaseType(Ty)) 5252 return false; 5253 5254 // The base type must be the same for all members. Types that 5255 // agree in both total size and mode (float vs. vector) are 5256 // treated as being equivalent here. 5257 const Type *TyPtr = Ty.getTypePtr(); 5258 if (!Base) { 5259 Base = TyPtr; 5260 // If it's a non-power-of-2 vector, its size is already a power-of-2, 5261 // so make sure to widen it explicitly. 5262 if (const VectorType *VT = Base->getAs<VectorType>()) { 5263 QualType EltTy = VT->getElementType(); 5264 unsigned NumElements = 5265 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 5266 Base = getContext() 5267 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 5268 .getTypePtr(); 5269 } 5270 } 5271 5272 if (Base->isVectorType() != TyPtr->isVectorType() || 5273 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 5274 return false; 5275 } 5276 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 5277 } 5278 5279 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5280 // Homogeneous aggregates for ELFv2 must have base types of float, 5281 // double, long double, or 128-bit vectors. 5282 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5283 if (BT->getKind() == BuiltinType::Float || 5284 BT->getKind() == BuiltinType::Double || 5285 BT->getKind() == BuiltinType::LongDouble || 5286 BT->getKind() == BuiltinType::Ibm128 || 5287 (getContext().getTargetInfo().hasFloat128Type() && 5288 (BT->getKind() == BuiltinType::Float128))) { 5289 if (IsSoftFloatABI) 5290 return false; 5291 return true; 5292 } 5293 } 5294 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5295 if (getContext().getTypeSize(VT) == 128) 5296 return true; 5297 } 5298 return false; 5299 } 5300 5301 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 5302 const Type *Base, uint64_t Members) const { 5303 // Vector and fp128 types require one register, other floating point types 5304 // require one or two registers depending on their size. 5305 uint32_t NumRegs = 5306 ((getContext().getTargetInfo().hasFloat128Type() && 5307 Base->isFloat128Type()) || 5308 Base->isVectorType()) ? 1 5309 : (getContext().getTypeSize(Base) + 63) / 64; 5310 5311 // Homogeneous Aggregates may occupy at most 8 registers. 5312 return Members * NumRegs <= 8; 5313 } 5314 5315 ABIArgInfo 5316 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 5317 Ty = useFirstFieldIfTransparentUnion(Ty); 5318 5319 if (Ty->isAnyComplexType()) 5320 return ABIArgInfo::getDirect(); 5321 5322 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 5323 // or via reference (larger than 16 bytes). 5324 if (Ty->isVectorType()) { 5325 uint64_t Size = getContext().getTypeSize(Ty); 5326 if (Size > 128) 5327 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5328 else if (Size < 128) { 5329 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5330 return ABIArgInfo::getDirect(CoerceTy); 5331 } 5332 } 5333 5334 if (const auto *EIT = Ty->getAs<BitIntType>()) 5335 if (EIT->getNumBits() > 128) 5336 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 5337 5338 if (isAggregateTypeForABI(Ty)) { 5339 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5340 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5341 5342 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 5343 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 5344 5345 // ELFv2 homogeneous aggregates are passed as array types. 5346 const Type *Base = nullptr; 5347 uint64_t Members = 0; 5348 if (Kind == ELFv2 && 5349 isHomogeneousAggregate(Ty, Base, Members)) { 5350 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5351 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5352 return ABIArgInfo::getDirect(CoerceTy); 5353 } 5354 5355 // If an aggregate may end up fully in registers, we do not 5356 // use the ByVal method, but pass the aggregate as array. 5357 // This is usually beneficial since we avoid forcing the 5358 // back-end to store the argument to memory. 5359 uint64_t Bits = getContext().getTypeSize(Ty); 5360 if (Bits > 0 && Bits <= 8 * GPRBits) { 5361 llvm::Type *CoerceTy; 5362 5363 // Types up to 8 bytes are passed as integer type (which will be 5364 // properly aligned in the argument save area doubleword). 5365 if (Bits <= GPRBits) 5366 CoerceTy = 5367 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5368 // Larger types are passed as arrays, with the base type selected 5369 // according to the required alignment in the save area. 5370 else { 5371 uint64_t RegBits = ABIAlign * 8; 5372 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 5373 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 5374 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 5375 } 5376 5377 return ABIArgInfo::getDirect(CoerceTy); 5378 } 5379 5380 // All other aggregates are passed ByVal. 5381 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5382 /*ByVal=*/true, 5383 /*Realign=*/TyAlign > ABIAlign); 5384 } 5385 5386 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 5387 : ABIArgInfo::getDirect()); 5388 } 5389 5390 ABIArgInfo 5391 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 5392 if (RetTy->isVoidType()) 5393 return ABIArgInfo::getIgnore(); 5394 5395 if (RetTy->isAnyComplexType()) 5396 return ABIArgInfo::getDirect(); 5397 5398 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 5399 // or via reference (larger than 16 bytes). 5400 if (RetTy->isVectorType()) { 5401 uint64_t Size = getContext().getTypeSize(RetTy); 5402 if (Size > 128) 5403 return getNaturalAlignIndirect(RetTy); 5404 else if (Size < 128) { 5405 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5406 return ABIArgInfo::getDirect(CoerceTy); 5407 } 5408 } 5409 5410 if (const auto *EIT = RetTy->getAs<BitIntType>()) 5411 if (EIT->getNumBits() > 128) 5412 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 5413 5414 if (isAggregateTypeForABI(RetTy)) { 5415 // ELFv2 homogeneous aggregates are returned as array types. 5416 const Type *Base = nullptr; 5417 uint64_t Members = 0; 5418 if (Kind == ELFv2 && 5419 isHomogeneousAggregate(RetTy, Base, Members)) { 5420 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5421 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5422 return ABIArgInfo::getDirect(CoerceTy); 5423 } 5424 5425 // ELFv2 small aggregates are returned in up to two registers. 5426 uint64_t Bits = getContext().getTypeSize(RetTy); 5427 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 5428 if (Bits == 0) 5429 return ABIArgInfo::getIgnore(); 5430 5431 llvm::Type *CoerceTy; 5432 if (Bits > GPRBits) { 5433 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 5434 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 5435 } else 5436 CoerceTy = 5437 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5438 return ABIArgInfo::getDirect(CoerceTy); 5439 } 5440 5441 // All other aggregates are returned indirectly. 5442 return getNaturalAlignIndirect(RetTy); 5443 } 5444 5445 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 5446 : ABIArgInfo::getDirect()); 5447 } 5448 5449 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 5450 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5451 QualType Ty) const { 5452 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 5453 TypeInfo.Align = getParamTypeAlignment(Ty); 5454 5455 CharUnits SlotSize = CharUnits::fromQuantity(8); 5456 5457 // If we have a complex type and the base type is smaller than 8 bytes, 5458 // the ABI calls for the real and imaginary parts to be right-adjusted 5459 // in separate doublewords. However, Clang expects us to produce a 5460 // pointer to a structure with the two parts packed tightly. So generate 5461 // loads of the real and imaginary parts relative to the va_list pointer, 5462 // and store them to a temporary structure. 5463 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 5464 CharUnits EltSize = TypeInfo.Width / 2; 5465 if (EltSize < SlotSize) 5466 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); 5467 } 5468 5469 // Otherwise, just use the general rule. 5470 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 5471 TypeInfo, SlotSize, /*AllowHigher*/ true); 5472 } 5473 5474 bool 5475 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 5476 CodeGen::CodeGenFunction &CGF, 5477 llvm::Value *Address) const { 5478 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5479 /*IsAIX*/ false); 5480 } 5481 5482 bool 5483 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5484 llvm::Value *Address) const { 5485 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5486 /*IsAIX*/ false); 5487 } 5488 5489 //===----------------------------------------------------------------------===// 5490 // AArch64 ABI Implementation 5491 //===----------------------------------------------------------------------===// 5492 5493 namespace { 5494 5495 class AArch64ABIInfo : public SwiftABIInfo { 5496 public: 5497 enum ABIKind { 5498 AAPCS = 0, 5499 DarwinPCS, 5500 Win64 5501 }; 5502 5503 private: 5504 ABIKind Kind; 5505 5506 public: 5507 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 5508 : SwiftABIInfo(CGT), Kind(Kind) {} 5509 5510 private: 5511 ABIKind getABIKind() const { return Kind; } 5512 bool isDarwinPCS() const { return Kind == DarwinPCS; } 5513 5514 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; 5515 ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic, 5516 unsigned CallingConvention) const; 5517 ABIArgInfo coerceIllegalVector(QualType Ty) const; 5518 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5519 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5520 uint64_t Members) const override; 5521 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override; 5522 5523 bool isIllegalVectorType(QualType Ty) const; 5524 5525 void computeInfo(CGFunctionInfo &FI) const override { 5526 if (!::classifyReturnType(getCXXABI(), FI, *this)) 5527 FI.getReturnInfo() = 5528 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5529 5530 for (auto &it : FI.arguments()) 5531 it.info = classifyArgumentType(it.type, FI.isVariadic(), 5532 FI.getCallingConvention()); 5533 } 5534 5535 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5536 CodeGenFunction &CGF) const; 5537 5538 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 5539 CodeGenFunction &CGF) const; 5540 5541 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5542 QualType Ty) const override { 5543 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5544 if (isa<llvm::ScalableVectorType>(BaseTy)) 5545 llvm::report_fatal_error("Passing SVE types to variadic functions is " 5546 "currently not supported"); 5547 5548 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 5549 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 5550 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 5551 } 5552 5553 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5554 QualType Ty) const override; 5555 5556 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 5557 bool asReturnValue) const override { 5558 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5559 } 5560 bool isSwiftErrorInRegister() const override { 5561 return true; 5562 } 5563 5564 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5565 unsigned elts) const override; 5566 5567 bool allowBFloatArgsAndRet() const override { 5568 return getTarget().hasBFloat16Type(); 5569 } 5570 }; 5571 5572 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 5573 public: 5574 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 5575 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {} 5576 5577 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5578 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 5579 } 5580 5581 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5582 return 31; 5583 } 5584 5585 bool doesReturnSlotInterfereWithArgs() const override { return false; } 5586 5587 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5588 CodeGen::CodeGenModule &CGM) const override { 5589 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5590 if (!FD) 5591 return; 5592 5593 const auto *TA = FD->getAttr<TargetAttr>(); 5594 if (TA == nullptr) 5595 return; 5596 5597 ParsedTargetAttr Attr = TA->parse(); 5598 if (Attr.BranchProtection.empty()) 5599 return; 5600 5601 TargetInfo::BranchProtectionInfo BPI; 5602 StringRef Error; 5603 (void)CGM.getTarget().validateBranchProtection( 5604 Attr.BranchProtection, Attr.Architecture, BPI, Error); 5605 assert(Error.empty()); 5606 5607 auto *Fn = cast<llvm::Function>(GV); 5608 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; 5609 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); 5610 5611 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { 5612 Fn->addFnAttr("sign-return-address-key", 5613 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey 5614 ? "a_key" 5615 : "b_key"); 5616 } 5617 5618 Fn->addFnAttr("branch-target-enforcement", 5619 BPI.BranchTargetEnforcement ? "true" : "false"); 5620 } 5621 5622 bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, 5623 llvm::Type *Ty) const override { 5624 if (CGF.getTarget().hasFeature("ls64")) { 5625 auto *ST = dyn_cast<llvm::StructType>(Ty); 5626 if (ST && ST->getNumElements() == 1) { 5627 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0)); 5628 if (AT && AT->getNumElements() == 8 && 5629 AT->getElementType()->isIntegerTy(64)) 5630 return true; 5631 } 5632 } 5633 return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty); 5634 } 5635 }; 5636 5637 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 5638 public: 5639 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 5640 : AArch64TargetCodeGenInfo(CGT, K) {} 5641 5642 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5643 CodeGen::CodeGenModule &CGM) const override; 5644 5645 void getDependentLibraryOption(llvm::StringRef Lib, 5646 llvm::SmallString<24> &Opt) const override { 5647 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5648 } 5649 5650 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5651 llvm::SmallString<32> &Opt) const override { 5652 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5653 } 5654 }; 5655 5656 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( 5657 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5658 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5659 if (GV->isDeclaration()) 5660 return; 5661 addStackProbeTargetAttributes(D, GV, CGM); 5662 } 5663 } 5664 5665 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { 5666 assert(Ty->isVectorType() && "expected vector type!"); 5667 5668 const auto *VT = Ty->castAs<VectorType>(); 5669 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { 5670 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 5671 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() == 5672 BuiltinType::UChar && 5673 "unexpected builtin type for SVE predicate!"); 5674 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get( 5675 llvm::Type::getInt1Ty(getVMContext()), 16)); 5676 } 5677 5678 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) { 5679 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 5680 5681 const auto *BT = VT->getElementType()->castAs<BuiltinType>(); 5682 llvm::ScalableVectorType *ResType = nullptr; 5683 switch (BT->getKind()) { 5684 default: 5685 llvm_unreachable("unexpected builtin type for SVE vector!"); 5686 case BuiltinType::SChar: 5687 case BuiltinType::UChar: 5688 ResType = llvm::ScalableVectorType::get( 5689 llvm::Type::getInt8Ty(getVMContext()), 16); 5690 break; 5691 case BuiltinType::Short: 5692 case BuiltinType::UShort: 5693 ResType = llvm::ScalableVectorType::get( 5694 llvm::Type::getInt16Ty(getVMContext()), 8); 5695 break; 5696 case BuiltinType::Int: 5697 case BuiltinType::UInt: 5698 ResType = llvm::ScalableVectorType::get( 5699 llvm::Type::getInt32Ty(getVMContext()), 4); 5700 break; 5701 case BuiltinType::Long: 5702 case BuiltinType::ULong: 5703 ResType = llvm::ScalableVectorType::get( 5704 llvm::Type::getInt64Ty(getVMContext()), 2); 5705 break; 5706 case BuiltinType::Half: 5707 ResType = llvm::ScalableVectorType::get( 5708 llvm::Type::getHalfTy(getVMContext()), 8); 5709 break; 5710 case BuiltinType::Float: 5711 ResType = llvm::ScalableVectorType::get( 5712 llvm::Type::getFloatTy(getVMContext()), 4); 5713 break; 5714 case BuiltinType::Double: 5715 ResType = llvm::ScalableVectorType::get( 5716 llvm::Type::getDoubleTy(getVMContext()), 2); 5717 break; 5718 case BuiltinType::BFloat16: 5719 ResType = llvm::ScalableVectorType::get( 5720 llvm::Type::getBFloatTy(getVMContext()), 8); 5721 break; 5722 } 5723 return ABIArgInfo::getDirect(ResType); 5724 } 5725 5726 uint64_t Size = getContext().getTypeSize(Ty); 5727 // Android promotes <2 x i8> to i16, not i32 5728 if (isAndroid() && (Size <= 16)) { 5729 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 5730 return ABIArgInfo::getDirect(ResType); 5731 } 5732 if (Size <= 32) { 5733 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 5734 return ABIArgInfo::getDirect(ResType); 5735 } 5736 if (Size == 64) { 5737 auto *ResType = 5738 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 5739 return ABIArgInfo::getDirect(ResType); 5740 } 5741 if (Size == 128) { 5742 auto *ResType = 5743 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 5744 return ABIArgInfo::getDirect(ResType); 5745 } 5746 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5747 } 5748 5749 ABIArgInfo 5750 AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic, 5751 unsigned CallingConvention) const { 5752 Ty = useFirstFieldIfTransparentUnion(Ty); 5753 5754 // Handle illegal vector types here. 5755 if (isIllegalVectorType(Ty)) 5756 return coerceIllegalVector(Ty); 5757 5758 if (!isAggregateTypeForABI(Ty)) { 5759 // Treat an enum type as its underlying type. 5760 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5761 Ty = EnumTy->getDecl()->getIntegerType(); 5762 5763 if (const auto *EIT = Ty->getAs<BitIntType>()) 5764 if (EIT->getNumBits() > 128) 5765 return getNaturalAlignIndirect(Ty); 5766 5767 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() 5768 ? ABIArgInfo::getExtend(Ty) 5769 : ABIArgInfo::getDirect()); 5770 } 5771 5772 // Structures with either a non-trivial destructor or a non-trivial 5773 // copy constructor are always indirect. 5774 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5775 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 5776 CGCXXABI::RAA_DirectInMemory); 5777 } 5778 5779 // Empty records are always ignored on Darwin, but actually passed in C++ mode 5780 // elsewhere for GNU compatibility. 5781 uint64_t Size = getContext().getTypeSize(Ty); 5782 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 5783 if (IsEmpty || Size == 0) { 5784 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 5785 return ABIArgInfo::getIgnore(); 5786 5787 // GNU C mode. The only argument that gets ignored is an empty one with size 5788 // 0. 5789 if (IsEmpty && Size == 0) 5790 return ABIArgInfo::getIgnore(); 5791 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5792 } 5793 5794 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 5795 const Type *Base = nullptr; 5796 uint64_t Members = 0; 5797 bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64; 5798 bool IsWinVariadic = IsWin64 && IsVariadic; 5799 // In variadic functions on Windows, all composite types are treated alike, 5800 // no special handling of HFAs/HVAs. 5801 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) { 5802 if (Kind != AArch64ABIInfo::AAPCS) 5803 return ABIArgInfo::getDirect( 5804 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 5805 5806 // For alignment adjusted HFAs, cap the argument alignment to 16, leave it 5807 // default otherwise. 5808 unsigned Align = 5809 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 5810 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); 5811 Align = (Align > BaseAlign && Align >= 16) ? 16 : 0; 5812 return ABIArgInfo::getDirect( 5813 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0, 5814 nullptr, true, Align); 5815 } 5816 5817 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 5818 if (Size <= 128) { 5819 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5820 // same size and alignment. 5821 if (getTarget().isRenderScriptTarget()) { 5822 return coerceToIntArray(Ty, getContext(), getVMContext()); 5823 } 5824 unsigned Alignment; 5825 if (Kind == AArch64ABIInfo::AAPCS) { 5826 Alignment = getContext().getTypeUnadjustedAlign(Ty); 5827 Alignment = Alignment < 128 ? 64 : 128; 5828 } else { 5829 Alignment = std::max(getContext().getTypeAlign(Ty), 5830 (unsigned)getTarget().getPointerWidth(0)); 5831 } 5832 Size = llvm::alignTo(Size, Alignment); 5833 5834 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5835 // For aggregates with 16-byte alignment, we use i128. 5836 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); 5837 return ABIArgInfo::getDirect( 5838 Size == Alignment ? BaseTy 5839 : llvm::ArrayType::get(BaseTy, Size / Alignment)); 5840 } 5841 5842 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5843 } 5844 5845 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, 5846 bool IsVariadic) const { 5847 if (RetTy->isVoidType()) 5848 return ABIArgInfo::getIgnore(); 5849 5850 if (const auto *VT = RetTy->getAs<VectorType>()) { 5851 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 5852 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 5853 return coerceIllegalVector(RetTy); 5854 } 5855 5856 // Large vector types should be returned via memory. 5857 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5858 return getNaturalAlignIndirect(RetTy); 5859 5860 if (!isAggregateTypeForABI(RetTy)) { 5861 // Treat an enum type as its underlying type. 5862 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5863 RetTy = EnumTy->getDecl()->getIntegerType(); 5864 5865 if (const auto *EIT = RetTy->getAs<BitIntType>()) 5866 if (EIT->getNumBits() > 128) 5867 return getNaturalAlignIndirect(RetTy); 5868 5869 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() 5870 ? ABIArgInfo::getExtend(RetTy) 5871 : ABIArgInfo::getDirect()); 5872 } 5873 5874 uint64_t Size = getContext().getTypeSize(RetTy); 5875 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5876 return ABIArgInfo::getIgnore(); 5877 5878 const Type *Base = nullptr; 5879 uint64_t Members = 0; 5880 if (isHomogeneousAggregate(RetTy, Base, Members) && 5881 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && 5882 IsVariadic)) 5883 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5884 return ABIArgInfo::getDirect(); 5885 5886 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5887 if (Size <= 128) { 5888 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5889 // same size and alignment. 5890 if (getTarget().isRenderScriptTarget()) { 5891 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5892 } 5893 5894 if (Size <= 64 && getDataLayout().isLittleEndian()) { 5895 // Composite types are returned in lower bits of a 64-bit register for LE, 5896 // and in higher bits for BE. However, integer types are always returned 5897 // in lower bits for both LE and BE, and they are not rounded up to 5898 // 64-bits. We can skip rounding up of composite types for LE, but not for 5899 // BE, otherwise composite types will be indistinguishable from integer 5900 // types. 5901 return ABIArgInfo::getDirect( 5902 llvm::IntegerType::get(getVMContext(), Size)); 5903 } 5904 5905 unsigned Alignment = getContext().getTypeAlign(RetTy); 5906 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5907 5908 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5909 // For aggregates with 16-byte alignment, we use i128. 5910 if (Alignment < 128 && Size == 128) { 5911 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5912 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5913 } 5914 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5915 } 5916 5917 return getNaturalAlignIndirect(RetTy); 5918 } 5919 5920 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5921 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5922 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5923 // Check whether VT is a fixed-length SVE vector. These types are 5924 // represented as scalable vectors in function args/return and must be 5925 // coerced from fixed vectors. 5926 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 5927 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 5928 return true; 5929 5930 // Check whether VT is legal. 5931 unsigned NumElements = VT->getNumElements(); 5932 uint64_t Size = getContext().getTypeSize(VT); 5933 // NumElements should be power of 2. 5934 if (!llvm::isPowerOf2_32(NumElements)) 5935 return true; 5936 5937 // arm64_32 has to be compatible with the ARM logic here, which allows huge 5938 // vectors for some reason. 5939 llvm::Triple Triple = getTarget().getTriple(); 5940 if (Triple.getArch() == llvm::Triple::aarch64_32 && 5941 Triple.isOSBinFormatMachO()) 5942 return Size <= 32; 5943 5944 return Size != 64 && (Size != 128 || NumElements == 1); 5945 } 5946 return false; 5947 } 5948 5949 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5950 llvm::Type *eltTy, 5951 unsigned elts) const { 5952 if (!llvm::isPowerOf2_32(elts)) 5953 return false; 5954 if (totalSize.getQuantity() != 8 && 5955 (totalSize.getQuantity() != 16 || elts == 1)) 5956 return false; 5957 return true; 5958 } 5959 5960 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5961 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5962 // point type or a short-vector type. This is the same as the 32-bit ABI, 5963 // but with the difference that any floating-point type is allowed, 5964 // including __fp16. 5965 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5966 if (BT->isFloatingPoint()) 5967 return true; 5968 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5969 unsigned VecSize = getContext().getTypeSize(VT); 5970 if (VecSize == 64 || VecSize == 128) 5971 return true; 5972 } 5973 return false; 5974 } 5975 5976 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5977 uint64_t Members) const { 5978 return Members <= 4; 5979 } 5980 5981 bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() 5982 const { 5983 // AAPCS64 says that the rule for whether something is a homogeneous 5984 // aggregate is applied to the output of the data layout decision. So 5985 // anything that doesn't affect the data layout also does not affect 5986 // homogeneity. In particular, zero-length bitfields don't stop a struct 5987 // being homogeneous. 5988 return true; 5989 } 5990 5991 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 5992 CodeGenFunction &CGF) const { 5993 ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true, 5994 CGF.CurFnInfo->getCallingConvention()); 5995 bool IsIndirect = AI.isIndirect(); 5996 5997 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5998 if (IsIndirect) 5999 BaseTy = llvm::PointerType::getUnqual(BaseTy); 6000 else if (AI.getCoerceToType()) 6001 BaseTy = AI.getCoerceToType(); 6002 6003 unsigned NumRegs = 1; 6004 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 6005 BaseTy = ArrTy->getElementType(); 6006 NumRegs = ArrTy->getNumElements(); 6007 } 6008 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 6009 6010 // The AArch64 va_list type and handling is specified in the Procedure Call 6011 // Standard, section B.4: 6012 // 6013 // struct { 6014 // void *__stack; 6015 // void *__gr_top; 6016 // void *__vr_top; 6017 // int __gr_offs; 6018 // int __vr_offs; 6019 // }; 6020 6021 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 6022 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 6023 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 6024 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 6025 6026 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 6027 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); 6028 6029 Address reg_offs_p = Address::invalid(); 6030 llvm::Value *reg_offs = nullptr; 6031 int reg_top_index; 6032 int RegSize = IsIndirect ? 8 : TySize.getQuantity(); 6033 if (!IsFPR) { 6034 // 3 is the field number of __gr_offs 6035 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 6036 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 6037 reg_top_index = 1; // field number for __gr_top 6038 RegSize = llvm::alignTo(RegSize, 8); 6039 } else { 6040 // 4 is the field number of __vr_offs. 6041 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 6042 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 6043 reg_top_index = 2; // field number for __vr_top 6044 RegSize = 16 * NumRegs; 6045 } 6046 6047 //======================================= 6048 // Find out where argument was passed 6049 //======================================= 6050 6051 // If reg_offs >= 0 we're already using the stack for this type of 6052 // argument. We don't want to keep updating reg_offs (in case it overflows, 6053 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 6054 // whatever they get). 6055 llvm::Value *UsingStack = nullptr; 6056 UsingStack = CGF.Builder.CreateICmpSGE( 6057 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 6058 6059 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 6060 6061 // Otherwise, at least some kind of argument could go in these registers, the 6062 // question is whether this particular type is too big. 6063 CGF.EmitBlock(MaybeRegBlock); 6064 6065 // Integer arguments may need to correct register alignment (for example a 6066 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 6067 // align __gr_offs to calculate the potential address. 6068 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 6069 int Align = TyAlign.getQuantity(); 6070 6071 reg_offs = CGF.Builder.CreateAdd( 6072 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 6073 "align_regoffs"); 6074 reg_offs = CGF.Builder.CreateAnd( 6075 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 6076 "aligned_regoffs"); 6077 } 6078 6079 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 6080 // The fact that this is done unconditionally reflects the fact that 6081 // allocating an argument to the stack also uses up all the remaining 6082 // registers of the appropriate kind. 6083 llvm::Value *NewOffset = nullptr; 6084 NewOffset = CGF.Builder.CreateAdd( 6085 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 6086 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 6087 6088 // Now we're in a position to decide whether this argument really was in 6089 // registers or not. 6090 llvm::Value *InRegs = nullptr; 6091 InRegs = CGF.Builder.CreateICmpSLE( 6092 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 6093 6094 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 6095 6096 //======================================= 6097 // Argument was in registers 6098 //======================================= 6099 6100 // Now we emit the code for if the argument was originally passed in 6101 // registers. First start the appropriate block: 6102 CGF.EmitBlock(InRegBlock); 6103 6104 llvm::Value *reg_top = nullptr; 6105 Address reg_top_p = 6106 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 6107 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 6108 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs), 6109 CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8)); 6110 Address RegAddr = Address::invalid(); 6111 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy; 6112 6113 if (IsIndirect) { 6114 // If it's been passed indirectly (actually a struct), whatever we find from 6115 // stored registers or on the stack will actually be a struct **. 6116 MemTy = llvm::PointerType::getUnqual(MemTy); 6117 } 6118 6119 const Type *Base = nullptr; 6120 uint64_t NumMembers = 0; 6121 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 6122 if (IsHFA && NumMembers > 1) { 6123 // Homogeneous aggregates passed in registers will have their elements split 6124 // and stored 16-bytes apart regardless of size (they're notionally in qN, 6125 // qN+1, ...). We reload and store into a temporary local variable 6126 // contiguously. 6127 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 6128 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 6129 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 6130 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 6131 Address Tmp = CGF.CreateTempAlloca(HFATy, 6132 std::max(TyAlign, BaseTyInfo.Align)); 6133 6134 // On big-endian platforms, the value will be right-aligned in its slot. 6135 int Offset = 0; 6136 if (CGF.CGM.getDataLayout().isBigEndian() && 6137 BaseTyInfo.Width.getQuantity() < 16) 6138 Offset = 16 - BaseTyInfo.Width.getQuantity(); 6139 6140 for (unsigned i = 0; i < NumMembers; ++i) { 6141 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 6142 Address LoadAddr = 6143 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 6144 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 6145 6146 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); 6147 6148 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 6149 CGF.Builder.CreateStore(Elem, StoreAddr); 6150 } 6151 6152 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 6153 } else { 6154 // Otherwise the object is contiguous in memory. 6155 6156 // It might be right-aligned in its slot. 6157 CharUnits SlotSize = BaseAddr.getAlignment(); 6158 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 6159 (IsHFA || !isAggregateTypeForABI(Ty)) && 6160 TySize < SlotSize) { 6161 CharUnits Offset = SlotSize - TySize; 6162 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 6163 } 6164 6165 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 6166 } 6167 6168 CGF.EmitBranch(ContBlock); 6169 6170 //======================================= 6171 // Argument was on the stack 6172 //======================================= 6173 CGF.EmitBlock(OnStackBlock); 6174 6175 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 6176 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 6177 6178 // Again, stack arguments may need realignment. In this case both integer and 6179 // floating-point ones might be affected. 6180 if (!IsIndirect && TyAlign.getQuantity() > 8) { 6181 int Align = TyAlign.getQuantity(); 6182 6183 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 6184 6185 OnStackPtr = CGF.Builder.CreateAdd( 6186 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 6187 "align_stack"); 6188 OnStackPtr = CGF.Builder.CreateAnd( 6189 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 6190 "align_stack"); 6191 6192 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 6193 } 6194 Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty, 6195 std::max(CharUnits::fromQuantity(8), TyAlign)); 6196 6197 // All stack slots are multiples of 8 bytes. 6198 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 6199 CharUnits StackSize; 6200 if (IsIndirect) 6201 StackSize = StackSlotSize; 6202 else 6203 StackSize = TySize.alignTo(StackSlotSize); 6204 6205 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 6206 llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP( 6207 CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack"); 6208 6209 // Write the new value of __stack for the next call to va_arg 6210 CGF.Builder.CreateStore(NewStack, stack_p); 6211 6212 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 6213 TySize < StackSlotSize) { 6214 CharUnits Offset = StackSlotSize - TySize; 6215 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 6216 } 6217 6218 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 6219 6220 CGF.EmitBranch(ContBlock); 6221 6222 //======================================= 6223 // Tidy up 6224 //======================================= 6225 CGF.EmitBlock(ContBlock); 6226 6227 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr, 6228 OnStackBlock, "vaargs.addr"); 6229 6230 if (IsIndirect) 6231 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy, 6232 TyAlign); 6233 6234 return ResAddr; 6235 } 6236 6237 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 6238 CodeGenFunction &CGF) const { 6239 // The backend's lowering doesn't support va_arg for aggregates or 6240 // illegal vector types. Lower VAArg here for these cases and use 6241 // the LLVM va_arg instruction for everything else. 6242 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 6243 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 6244 6245 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8; 6246 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); 6247 6248 // Empty records are ignored for parameter passing purposes. 6249 if (isEmptyRecord(getContext(), Ty, true)) { 6250 Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), 6251 getVAListElementType(CGF), SlotSize); 6252 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6253 return Addr; 6254 } 6255 6256 // The size of the actual thing passed, which might end up just 6257 // being a pointer for indirect types. 6258 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6259 6260 // Arguments bigger than 16 bytes which aren't homogeneous 6261 // aggregates should be passed indirectly. 6262 bool IsIndirect = false; 6263 if (TyInfo.Width.getQuantity() > 16) { 6264 const Type *Base = nullptr; 6265 uint64_t Members = 0; 6266 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 6267 } 6268 6269 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 6270 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 6271 } 6272 6273 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 6274 QualType Ty) const { 6275 bool IsIndirect = false; 6276 6277 // Composites larger than 16 bytes are passed by reference. 6278 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128) 6279 IsIndirect = true; 6280 6281 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 6282 CGF.getContext().getTypeInfoInChars(Ty), 6283 CharUnits::fromQuantity(8), 6284 /*allowHigherAlign*/ false); 6285 } 6286 6287 //===----------------------------------------------------------------------===// 6288 // ARM ABI Implementation 6289 //===----------------------------------------------------------------------===// 6290 6291 namespace { 6292 6293 class ARMABIInfo : public SwiftABIInfo { 6294 public: 6295 enum ABIKind { 6296 APCS = 0, 6297 AAPCS = 1, 6298 AAPCS_VFP = 2, 6299 AAPCS16_VFP = 3, 6300 }; 6301 6302 private: 6303 ABIKind Kind; 6304 bool IsFloatABISoftFP; 6305 6306 public: 6307 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 6308 : SwiftABIInfo(CGT), Kind(_Kind) { 6309 setCCs(); 6310 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || 6311 CGT.getCodeGenOpts().FloatABI == ""; // default 6312 } 6313 6314 bool isEABI() const { 6315 switch (getTarget().getTriple().getEnvironment()) { 6316 case llvm::Triple::Android: 6317 case llvm::Triple::EABI: 6318 case llvm::Triple::EABIHF: 6319 case llvm::Triple::GNUEABI: 6320 case llvm::Triple::GNUEABIHF: 6321 case llvm::Triple::MuslEABI: 6322 case llvm::Triple::MuslEABIHF: 6323 return true; 6324 default: 6325 return false; 6326 } 6327 } 6328 6329 bool isEABIHF() const { 6330 switch (getTarget().getTriple().getEnvironment()) { 6331 case llvm::Triple::EABIHF: 6332 case llvm::Triple::GNUEABIHF: 6333 case llvm::Triple::MuslEABIHF: 6334 return true; 6335 default: 6336 return false; 6337 } 6338 } 6339 6340 ABIKind getABIKind() const { return Kind; } 6341 6342 bool allowBFloatArgsAndRet() const override { 6343 return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); 6344 } 6345 6346 private: 6347 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 6348 unsigned functionCallConv) const; 6349 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 6350 unsigned functionCallConv) const; 6351 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 6352 uint64_t Members) const; 6353 ABIArgInfo coerceIllegalVector(QualType Ty) const; 6354 bool isIllegalVectorType(QualType Ty) const; 6355 bool containsAnyFP16Vectors(QualType Ty) const; 6356 6357 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 6358 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 6359 uint64_t Members) const override; 6360 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override; 6361 6362 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 6363 6364 void computeInfo(CGFunctionInfo &FI) const override; 6365 6366 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6367 QualType Ty) const override; 6368 6369 llvm::CallingConv::ID getLLVMDefaultCC() const; 6370 llvm::CallingConv::ID getABIDefaultCC() const; 6371 void setCCs(); 6372 6373 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 6374 bool asReturnValue) const override { 6375 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6376 } 6377 bool isSwiftErrorInRegister() const override { 6378 return true; 6379 } 6380 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 6381 unsigned elts) const override; 6382 }; 6383 6384 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 6385 public: 6386 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6387 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {} 6388 6389 const ARMABIInfo &getABIInfo() const { 6390 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 6391 } 6392 6393 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6394 return 13; 6395 } 6396 6397 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 6398 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 6399 } 6400 6401 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6402 llvm::Value *Address) const override { 6403 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 6404 6405 // 0-15 are the 16 integer registers. 6406 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 6407 return false; 6408 } 6409 6410 unsigned getSizeOfUnwindException() const override { 6411 if (getABIInfo().isEABI()) return 88; 6412 return TargetCodeGenInfo::getSizeOfUnwindException(); 6413 } 6414 6415 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6416 CodeGen::CodeGenModule &CGM) const override { 6417 if (GV->isDeclaration()) 6418 return; 6419 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6420 if (!FD) 6421 return; 6422 auto *Fn = cast<llvm::Function>(GV); 6423 6424 if (const auto *TA = FD->getAttr<TargetAttr>()) { 6425 ParsedTargetAttr Attr = TA->parse(); 6426 if (!Attr.BranchProtection.empty()) { 6427 TargetInfo::BranchProtectionInfo BPI; 6428 StringRef DiagMsg; 6429 StringRef Arch = Attr.Architecture.empty() 6430 ? CGM.getTarget().getTargetOpts().CPU 6431 : Attr.Architecture; 6432 if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 6433 Arch, BPI, DiagMsg)) { 6434 CGM.getDiags().Report( 6435 D->getLocation(), 6436 diag::warn_target_unsupported_branch_protection_attribute) 6437 << Arch; 6438 } else { 6439 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; 6440 assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 && 6441 "Unexpected SignReturnAddressScopeKind"); 6442 Fn->addFnAttr( 6443 "sign-return-address", 6444 SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); 6445 6446 Fn->addFnAttr("branch-target-enforcement", 6447 BPI.BranchTargetEnforcement ? "true" : "false"); 6448 } 6449 } else if (CGM.getLangOpts().BranchTargetEnforcement || 6450 CGM.getLangOpts().hasSignReturnAddress()) { 6451 // If the Branch Protection attribute is missing, validate the target 6452 // Architecture attribute against Branch Protection command line 6453 // settings. 6454 if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.Architecture)) 6455 CGM.getDiags().Report( 6456 D->getLocation(), 6457 diag::warn_target_unsupported_branch_protection_attribute) 6458 << Attr.Architecture; 6459 } 6460 } 6461 6462 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 6463 if (!Attr) 6464 return; 6465 6466 const char *Kind; 6467 switch (Attr->getInterrupt()) { 6468 case ARMInterruptAttr::Generic: Kind = ""; break; 6469 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 6470 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 6471 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 6472 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 6473 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 6474 } 6475 6476 Fn->addFnAttr("interrupt", Kind); 6477 6478 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 6479 if (ABI == ARMABIInfo::APCS) 6480 return; 6481 6482 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 6483 // however this is not necessarily true on taking any interrupt. Instruct 6484 // the backend to perform a realignment as part of the function prologue. 6485 llvm::AttrBuilder B(Fn->getContext()); 6486 B.addStackAlignmentAttr(8); 6487 Fn->addFnAttrs(B); 6488 } 6489 }; 6490 6491 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 6492 public: 6493 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6494 : ARMTargetCodeGenInfo(CGT, K) {} 6495 6496 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6497 CodeGen::CodeGenModule &CGM) const override; 6498 6499 void getDependentLibraryOption(llvm::StringRef Lib, 6500 llvm::SmallString<24> &Opt) const override { 6501 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 6502 } 6503 6504 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 6505 llvm::SmallString<32> &Opt) const override { 6506 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 6507 } 6508 }; 6509 6510 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 6511 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 6512 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 6513 if (GV->isDeclaration()) 6514 return; 6515 addStackProbeTargetAttributes(D, GV, CGM); 6516 } 6517 } 6518 6519 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 6520 if (!::classifyReturnType(getCXXABI(), FI, *this)) 6521 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 6522 FI.getCallingConvention()); 6523 6524 for (auto &I : FI.arguments()) 6525 I.info = classifyArgumentType(I.type, FI.isVariadic(), 6526 FI.getCallingConvention()); 6527 6528 6529 // Always honor user-specified calling convention. 6530 if (FI.getCallingConvention() != llvm::CallingConv::C) 6531 return; 6532 6533 llvm::CallingConv::ID cc = getRuntimeCC(); 6534 if (cc != llvm::CallingConv::C) 6535 FI.setEffectiveCallingConvention(cc); 6536 } 6537 6538 /// Return the default calling convention that LLVM will use. 6539 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 6540 // The default calling convention that LLVM will infer. 6541 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 6542 return llvm::CallingConv::ARM_AAPCS_VFP; 6543 else if (isEABI()) 6544 return llvm::CallingConv::ARM_AAPCS; 6545 else 6546 return llvm::CallingConv::ARM_APCS; 6547 } 6548 6549 /// Return the calling convention that our ABI would like us to use 6550 /// as the C calling convention. 6551 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 6552 switch (getABIKind()) { 6553 case APCS: return llvm::CallingConv::ARM_APCS; 6554 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 6555 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6556 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6557 } 6558 llvm_unreachable("bad ABI kind"); 6559 } 6560 6561 void ARMABIInfo::setCCs() { 6562 assert(getRuntimeCC() == llvm::CallingConv::C); 6563 6564 // Don't muddy up the IR with a ton of explicit annotations if 6565 // they'd just match what LLVM will infer from the triple. 6566 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 6567 if (abiCC != getLLVMDefaultCC()) 6568 RuntimeCC = abiCC; 6569 } 6570 6571 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 6572 uint64_t Size = getContext().getTypeSize(Ty); 6573 if (Size <= 32) { 6574 llvm::Type *ResType = 6575 llvm::Type::getInt32Ty(getVMContext()); 6576 return ABIArgInfo::getDirect(ResType); 6577 } 6578 if (Size == 64 || Size == 128) { 6579 auto *ResType = llvm::FixedVectorType::get( 6580 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6581 return ABIArgInfo::getDirect(ResType); 6582 } 6583 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6584 } 6585 6586 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 6587 const Type *Base, 6588 uint64_t Members) const { 6589 assert(Base && "Base class should be set for homogeneous aggregate"); 6590 // Base can be a floating-point or a vector. 6591 if (const VectorType *VT = Base->getAs<VectorType>()) { 6592 // FP16 vectors should be converted to integer vectors 6593 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { 6594 uint64_t Size = getContext().getTypeSize(VT); 6595 auto *NewVecTy = llvm::FixedVectorType::get( 6596 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6597 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 6598 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6599 } 6600 } 6601 unsigned Align = 0; 6602 if (getABIKind() == ARMABIInfo::AAPCS || 6603 getABIKind() == ARMABIInfo::AAPCS_VFP) { 6604 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it 6605 // default otherwise. 6606 Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 6607 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity(); 6608 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0; 6609 } 6610 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align); 6611 } 6612 6613 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 6614 unsigned functionCallConv) const { 6615 // 6.1.2.1 The following argument types are VFP CPRCs: 6616 // A single-precision floating-point type (including promoted 6617 // half-precision types); A double-precision floating-point type; 6618 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 6619 // with a Base Type of a single- or double-precision floating-point type, 6620 // 64-bit containerized vectors or 128-bit containerized vectors with one 6621 // to four Elements. 6622 // Variadic functions should always marshal to the base standard. 6623 bool IsAAPCS_VFP = 6624 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 6625 6626 Ty = useFirstFieldIfTransparentUnion(Ty); 6627 6628 // Handle illegal vector types here. 6629 if (isIllegalVectorType(Ty)) 6630 return coerceIllegalVector(Ty); 6631 6632 if (!isAggregateTypeForABI(Ty)) { 6633 // Treat an enum type as its underlying type. 6634 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 6635 Ty = EnumTy->getDecl()->getIntegerType(); 6636 } 6637 6638 if (const auto *EIT = Ty->getAs<BitIntType>()) 6639 if (EIT->getNumBits() > 64) 6640 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 6641 6642 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 6643 : ABIArgInfo::getDirect()); 6644 } 6645 6646 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6647 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6648 } 6649 6650 // Ignore empty records. 6651 if (isEmptyRecord(getContext(), Ty, true)) 6652 return ABIArgInfo::getIgnore(); 6653 6654 if (IsAAPCS_VFP) { 6655 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 6656 // into VFP registers. 6657 const Type *Base = nullptr; 6658 uint64_t Members = 0; 6659 if (isHomogeneousAggregate(Ty, Base, Members)) 6660 return classifyHomogeneousAggregate(Ty, Base, Members); 6661 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6662 // WatchOS does have homogeneous aggregates. Note that we intentionally use 6663 // this convention even for a variadic function: the backend will use GPRs 6664 // if needed. 6665 const Type *Base = nullptr; 6666 uint64_t Members = 0; 6667 if (isHomogeneousAggregate(Ty, Base, Members)) { 6668 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 6669 llvm::Type *Ty = 6670 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 6671 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6672 } 6673 } 6674 6675 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 6676 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 6677 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 6678 // bigger than 128-bits, they get placed in space allocated by the caller, 6679 // and a pointer is passed. 6680 return ABIArgInfo::getIndirect( 6681 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 6682 } 6683 6684 // Support byval for ARM. 6685 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 6686 // most 8-byte. We realign the indirect argument if type alignment is bigger 6687 // than ABI alignment. 6688 uint64_t ABIAlign = 4; 6689 uint64_t TyAlign; 6690 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6691 getABIKind() == ARMABIInfo::AAPCS) { 6692 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 6693 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 6694 } else { 6695 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 6696 } 6697 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 6698 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 6699 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 6700 /*ByVal=*/true, 6701 /*Realign=*/TyAlign > ABIAlign); 6702 } 6703 6704 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 6705 // same size and alignment. 6706 if (getTarget().isRenderScriptTarget()) { 6707 return coerceToIntArray(Ty, getContext(), getVMContext()); 6708 } 6709 6710 // Otherwise, pass by coercing to a structure of the appropriate size. 6711 llvm::Type* ElemTy; 6712 unsigned SizeRegs; 6713 // FIXME: Try to match the types of the arguments more accurately where 6714 // we can. 6715 if (TyAlign <= 4) { 6716 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 6717 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 6718 } else { 6719 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 6720 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 6721 } 6722 6723 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 6724 } 6725 6726 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 6727 llvm::LLVMContext &VMContext) { 6728 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 6729 // is called integer-like if its size is less than or equal to one word, and 6730 // the offset of each of its addressable sub-fields is zero. 6731 6732 uint64_t Size = Context.getTypeSize(Ty); 6733 6734 // Check that the type fits in a word. 6735 if (Size > 32) 6736 return false; 6737 6738 // FIXME: Handle vector types! 6739 if (Ty->isVectorType()) 6740 return false; 6741 6742 // Float types are never treated as "integer like". 6743 if (Ty->isRealFloatingType()) 6744 return false; 6745 6746 // If this is a builtin or pointer type then it is ok. 6747 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 6748 return true; 6749 6750 // Small complex integer types are "integer like". 6751 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 6752 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 6753 6754 // Single element and zero sized arrays should be allowed, by the definition 6755 // above, but they are not. 6756 6757 // Otherwise, it must be a record type. 6758 const RecordType *RT = Ty->getAs<RecordType>(); 6759 if (!RT) return false; 6760 6761 // Ignore records with flexible arrays. 6762 const RecordDecl *RD = RT->getDecl(); 6763 if (RD->hasFlexibleArrayMember()) 6764 return false; 6765 6766 // Check that all sub-fields are at offset 0, and are themselves "integer 6767 // like". 6768 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 6769 6770 bool HadField = false; 6771 unsigned idx = 0; 6772 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6773 i != e; ++i, ++idx) { 6774 const FieldDecl *FD = *i; 6775 6776 // Bit-fields are not addressable, we only need to verify they are "integer 6777 // like". We still have to disallow a subsequent non-bitfield, for example: 6778 // struct { int : 0; int x } 6779 // is non-integer like according to gcc. 6780 if (FD->isBitField()) { 6781 if (!RD->isUnion()) 6782 HadField = true; 6783 6784 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6785 return false; 6786 6787 continue; 6788 } 6789 6790 // Check if this field is at offset 0. 6791 if (Layout.getFieldOffset(idx) != 0) 6792 return false; 6793 6794 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6795 return false; 6796 6797 // Only allow at most one field in a structure. This doesn't match the 6798 // wording above, but follows gcc in situations with a field following an 6799 // empty structure. 6800 if (!RD->isUnion()) { 6801 if (HadField) 6802 return false; 6803 6804 HadField = true; 6805 } 6806 } 6807 6808 return true; 6809 } 6810 6811 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 6812 unsigned functionCallConv) const { 6813 6814 // Variadic functions should always marshal to the base standard. 6815 bool IsAAPCS_VFP = 6816 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 6817 6818 if (RetTy->isVoidType()) 6819 return ABIArgInfo::getIgnore(); 6820 6821 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 6822 // Large vector types should be returned via memory. 6823 if (getContext().getTypeSize(RetTy) > 128) 6824 return getNaturalAlignIndirect(RetTy); 6825 // TODO: FP16/BF16 vectors should be converted to integer vectors 6826 // This check is similar to isIllegalVectorType - refactor? 6827 if ((!getTarget().hasLegalHalfType() && 6828 (VT->getElementType()->isFloat16Type() || 6829 VT->getElementType()->isHalfType())) || 6830 (IsFloatABISoftFP && 6831 VT->getElementType()->isBFloat16Type())) 6832 return coerceIllegalVector(RetTy); 6833 } 6834 6835 if (!isAggregateTypeForABI(RetTy)) { 6836 // Treat an enum type as its underlying type. 6837 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6838 RetTy = EnumTy->getDecl()->getIntegerType(); 6839 6840 if (const auto *EIT = RetTy->getAs<BitIntType>()) 6841 if (EIT->getNumBits() > 64) 6842 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 6843 6844 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 6845 : ABIArgInfo::getDirect(); 6846 } 6847 6848 // Are we following APCS? 6849 if (getABIKind() == APCS) { 6850 if (isEmptyRecord(getContext(), RetTy, false)) 6851 return ABIArgInfo::getIgnore(); 6852 6853 // Complex types are all returned as packed integers. 6854 // 6855 // FIXME: Consider using 2 x vector types if the back end handles them 6856 // correctly. 6857 if (RetTy->isAnyComplexType()) 6858 return ABIArgInfo::getDirect(llvm::IntegerType::get( 6859 getVMContext(), getContext().getTypeSize(RetTy))); 6860 6861 // Integer like structures are returned in r0. 6862 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 6863 // Return in the smallest viable integer type. 6864 uint64_t Size = getContext().getTypeSize(RetTy); 6865 if (Size <= 8) 6866 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6867 if (Size <= 16) 6868 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6869 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6870 } 6871 6872 // Otherwise return in memory. 6873 return getNaturalAlignIndirect(RetTy); 6874 } 6875 6876 // Otherwise this is an AAPCS variant. 6877 6878 if (isEmptyRecord(getContext(), RetTy, true)) 6879 return ABIArgInfo::getIgnore(); 6880 6881 // Check for homogeneous aggregates with AAPCS-VFP. 6882 if (IsAAPCS_VFP) { 6883 const Type *Base = nullptr; 6884 uint64_t Members = 0; 6885 if (isHomogeneousAggregate(RetTy, Base, Members)) 6886 return classifyHomogeneousAggregate(RetTy, Base, Members); 6887 } 6888 6889 // Aggregates <= 4 bytes are returned in r0; other aggregates 6890 // are returned indirectly. 6891 uint64_t Size = getContext().getTypeSize(RetTy); 6892 if (Size <= 32) { 6893 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 6894 // same size and alignment. 6895 if (getTarget().isRenderScriptTarget()) { 6896 return coerceToIntArray(RetTy, getContext(), getVMContext()); 6897 } 6898 if (getDataLayout().isBigEndian()) 6899 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 6900 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6901 6902 // Return in the smallest viable integer type. 6903 if (Size <= 8) 6904 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6905 if (Size <= 16) 6906 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6907 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6908 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 6909 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 6910 llvm::Type *CoerceTy = 6911 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 6912 return ABIArgInfo::getDirect(CoerceTy); 6913 } 6914 6915 return getNaturalAlignIndirect(RetTy); 6916 } 6917 6918 /// isIllegalVector - check whether Ty is an illegal vector type. 6919 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 6920 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 6921 // On targets that don't support half, fp16 or bfloat, they are expanded 6922 // into float, and we don't want the ABI to depend on whether or not they 6923 // are supported in hardware. Thus return false to coerce vectors of these 6924 // types into integer vectors. 6925 // We do not depend on hasLegalHalfType for bfloat as it is a 6926 // separate IR type. 6927 if ((!getTarget().hasLegalHalfType() && 6928 (VT->getElementType()->isFloat16Type() || 6929 VT->getElementType()->isHalfType())) || 6930 (IsFloatABISoftFP && 6931 VT->getElementType()->isBFloat16Type())) 6932 return true; 6933 if (isAndroid()) { 6934 // Android shipped using Clang 3.1, which supported a slightly different 6935 // vector ABI. The primary differences were that 3-element vector types 6936 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 6937 // accepts that legacy behavior for Android only. 6938 // Check whether VT is legal. 6939 unsigned NumElements = VT->getNumElements(); 6940 // NumElements should be power of 2 or equal to 3. 6941 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 6942 return true; 6943 } else { 6944 // Check whether VT is legal. 6945 unsigned NumElements = VT->getNumElements(); 6946 uint64_t Size = getContext().getTypeSize(VT); 6947 // NumElements should be power of 2. 6948 if (!llvm::isPowerOf2_32(NumElements)) 6949 return true; 6950 // Size should be greater than 32 bits. 6951 return Size <= 32; 6952 } 6953 } 6954 return false; 6955 } 6956 6957 /// Return true if a type contains any 16-bit floating point vectors 6958 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { 6959 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 6960 uint64_t NElements = AT->getSize().getZExtValue(); 6961 if (NElements == 0) 6962 return false; 6963 return containsAnyFP16Vectors(AT->getElementType()); 6964 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 6965 const RecordDecl *RD = RT->getDecl(); 6966 6967 // If this is a C++ record, check the bases first. 6968 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6969 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { 6970 return containsAnyFP16Vectors(B.getType()); 6971 })) 6972 return true; 6973 6974 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { 6975 return FD && containsAnyFP16Vectors(FD->getType()); 6976 })) 6977 return true; 6978 6979 return false; 6980 } else { 6981 if (const VectorType *VT = Ty->getAs<VectorType>()) 6982 return (VT->getElementType()->isFloat16Type() || 6983 VT->getElementType()->isBFloat16Type() || 6984 VT->getElementType()->isHalfType()); 6985 return false; 6986 } 6987 } 6988 6989 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 6990 llvm::Type *eltTy, 6991 unsigned numElts) const { 6992 if (!llvm::isPowerOf2_32(numElts)) 6993 return false; 6994 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 6995 if (size > 64) 6996 return false; 6997 if (vectorSize.getQuantity() != 8 && 6998 (vectorSize.getQuantity() != 16 || numElts == 1)) 6999 return false; 7000 return true; 7001 } 7002 7003 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 7004 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 7005 // double, or 64-bit or 128-bit vectors. 7006 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 7007 if (BT->getKind() == BuiltinType::Float || 7008 BT->getKind() == BuiltinType::Double || 7009 BT->getKind() == BuiltinType::LongDouble) 7010 return true; 7011 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 7012 unsigned VecSize = getContext().getTypeSize(VT); 7013 if (VecSize == 64 || VecSize == 128) 7014 return true; 7015 } 7016 return false; 7017 } 7018 7019 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 7020 uint64_t Members) const { 7021 return Members <= 4; 7022 } 7023 7024 bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { 7025 // AAPCS32 says that the rule for whether something is a homogeneous 7026 // aggregate is applied to the output of the data layout decision. So 7027 // anything that doesn't affect the data layout also does not affect 7028 // homogeneity. In particular, zero-length bitfields don't stop a struct 7029 // being homogeneous. 7030 return true; 7031 } 7032 7033 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 7034 bool acceptHalf) const { 7035 // Give precedence to user-specified calling conventions. 7036 if (callConvention != llvm::CallingConv::C) 7037 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 7038 else 7039 return (getABIKind() == AAPCS_VFP) || 7040 (acceptHalf && (getABIKind() == AAPCS16_VFP)); 7041 } 7042 7043 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7044 QualType Ty) const { 7045 CharUnits SlotSize = CharUnits::fromQuantity(4); 7046 7047 // Empty records are ignored for parameter passing purposes. 7048 if (isEmptyRecord(getContext(), Ty, true)) { 7049 Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr), 7050 getVAListElementType(CGF), SlotSize); 7051 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 7052 return Addr; 7053 } 7054 7055 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 7056 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 7057 7058 // Use indirect if size of the illegal vector is bigger than 16 bytes. 7059 bool IsIndirect = false; 7060 const Type *Base = nullptr; 7061 uint64_t Members = 0; 7062 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 7063 IsIndirect = true; 7064 7065 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 7066 // allocated by the caller. 7067 } else if (TySize > CharUnits::fromQuantity(16) && 7068 getABIKind() == ARMABIInfo::AAPCS16_VFP && 7069 !isHomogeneousAggregate(Ty, Base, Members)) { 7070 IsIndirect = true; 7071 7072 // Otherwise, bound the type's ABI alignment. 7073 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 7074 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 7075 // Our callers should be prepared to handle an under-aligned address. 7076 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 7077 getABIKind() == ARMABIInfo::AAPCS) { 7078 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 7079 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 7080 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 7081 // ARMv7k allows type alignment up to 16 bytes. 7082 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 7083 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 7084 } else { 7085 TyAlignForABI = CharUnits::fromQuantity(4); 7086 } 7087 7088 TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None); 7089 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 7090 SlotSize, /*AllowHigherAlign*/ true); 7091 } 7092 7093 //===----------------------------------------------------------------------===// 7094 // NVPTX ABI Implementation 7095 //===----------------------------------------------------------------------===// 7096 7097 namespace { 7098 7099 class NVPTXTargetCodeGenInfo; 7100 7101 class NVPTXABIInfo : public ABIInfo { 7102 NVPTXTargetCodeGenInfo &CGInfo; 7103 7104 public: 7105 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info) 7106 : ABIInfo(CGT), CGInfo(Info) {} 7107 7108 ABIArgInfo classifyReturnType(QualType RetTy) const; 7109 ABIArgInfo classifyArgumentType(QualType Ty) const; 7110 7111 void computeInfo(CGFunctionInfo &FI) const override; 7112 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7113 QualType Ty) const override; 7114 bool isUnsupportedType(QualType T) const; 7115 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const; 7116 }; 7117 7118 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 7119 public: 7120 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 7121 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {} 7122 7123 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7124 CodeGen::CodeGenModule &M) const override; 7125 bool shouldEmitStaticExternCAliases() const override; 7126 7127 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override { 7128 // On the device side, surface reference is represented as an object handle 7129 // in 64-bit integer. 7130 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 7131 } 7132 7133 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override { 7134 // On the device side, texture reference is represented as an object handle 7135 // in 64-bit integer. 7136 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 7137 } 7138 7139 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst, 7140 LValue Src) const override { 7141 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 7142 return true; 7143 } 7144 7145 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst, 7146 LValue Src) const override { 7147 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 7148 return true; 7149 } 7150 7151 private: 7152 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the 7153 // resulting MDNode to the nvvm.annotations MDNode. 7154 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name, 7155 int Operand); 7156 7157 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst, 7158 LValue Src) { 7159 llvm::Value *Handle = nullptr; 7160 llvm::Constant *C = 7161 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer()); 7162 // Lookup `addrspacecast` through the constant pointer if any. 7163 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C)) 7164 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand()); 7165 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) { 7166 // Load the handle from the specific global variable using 7167 // `nvvm.texsurf.handle.internal` intrinsic. 7168 Handle = CGF.EmitRuntimeCall( 7169 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal, 7170 {GV->getType()}), 7171 {GV}, "texsurf_handle"); 7172 } else 7173 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation()); 7174 CGF.EmitStoreOfScalar(Handle, Dst); 7175 } 7176 }; 7177 7178 /// Checks if the type is unsupported directly by the current target. 7179 bool NVPTXABIInfo::isUnsupportedType(QualType T) const { 7180 ASTContext &Context = getContext(); 7181 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) 7182 return true; 7183 if (!Context.getTargetInfo().hasFloat128Type() && 7184 (T->isFloat128Type() || 7185 (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) 7186 return true; 7187 if (const auto *EIT = T->getAs<BitIntType>()) 7188 return EIT->getNumBits() > 7189 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U); 7190 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && 7191 Context.getTypeSize(T) > 64U) 7192 return true; 7193 if (const auto *AT = T->getAsArrayTypeUnsafe()) 7194 return isUnsupportedType(AT->getElementType()); 7195 const auto *RT = T->getAs<RecordType>(); 7196 if (!RT) 7197 return false; 7198 const RecordDecl *RD = RT->getDecl(); 7199 7200 // If this is a C++ record, check the bases first. 7201 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 7202 for (const CXXBaseSpecifier &I : CXXRD->bases()) 7203 if (isUnsupportedType(I.getType())) 7204 return true; 7205 7206 for (const FieldDecl *I : RD->fields()) 7207 if (isUnsupportedType(I->getType())) 7208 return true; 7209 return false; 7210 } 7211 7212 /// Coerce the given type into an array with maximum allowed size of elements. 7213 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty, 7214 unsigned MaxSize) const { 7215 // Alignment and Size are measured in bits. 7216 const uint64_t Size = getContext().getTypeSize(Ty); 7217 const uint64_t Alignment = getContext().getTypeAlign(Ty); 7218 const unsigned Div = std::min<unsigned>(MaxSize, Alignment); 7219 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div); 7220 const uint64_t NumElements = (Size + Div - 1) / Div; 7221 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 7222 } 7223 7224 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 7225 if (RetTy->isVoidType()) 7226 return ABIArgInfo::getIgnore(); 7227 7228 if (getContext().getLangOpts().OpenMP && 7229 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy)) 7230 return coerceToIntArrayWithLimit(RetTy, 64); 7231 7232 // note: this is different from default ABI 7233 if (!RetTy->isScalarType()) 7234 return ABIArgInfo::getDirect(); 7235 7236 // Treat an enum type as its underlying type. 7237 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7238 RetTy = EnumTy->getDecl()->getIntegerType(); 7239 7240 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 7241 : ABIArgInfo::getDirect()); 7242 } 7243 7244 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 7245 // Treat an enum type as its underlying type. 7246 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7247 Ty = EnumTy->getDecl()->getIntegerType(); 7248 7249 // Return aggregates type as indirect by value 7250 if (isAggregateTypeForABI(Ty)) { 7251 // Under CUDA device compilation, tex/surf builtin types are replaced with 7252 // object types and passed directly. 7253 if (getContext().getLangOpts().CUDAIsDevice) { 7254 if (Ty->isCUDADeviceBuiltinSurfaceType()) 7255 return ABIArgInfo::getDirect( 7256 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType()); 7257 if (Ty->isCUDADeviceBuiltinTextureType()) 7258 return ABIArgInfo::getDirect( 7259 CGInfo.getCUDADeviceBuiltinTextureDeviceType()); 7260 } 7261 return getNaturalAlignIndirect(Ty, /* byval */ true); 7262 } 7263 7264 if (const auto *EIT = Ty->getAs<BitIntType>()) { 7265 if ((EIT->getNumBits() > 128) || 7266 (!getContext().getTargetInfo().hasInt128Type() && 7267 EIT->getNumBits() > 64)) 7268 return getNaturalAlignIndirect(Ty, /* byval */ true); 7269 } 7270 7271 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 7272 : ABIArgInfo::getDirect()); 7273 } 7274 7275 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 7276 if (!getCXXABI().classifyReturnType(FI)) 7277 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7278 for (auto &I : FI.arguments()) 7279 I.info = classifyArgumentType(I.type); 7280 7281 // Always honor user-specified calling convention. 7282 if (FI.getCallingConvention() != llvm::CallingConv::C) 7283 return; 7284 7285 FI.setEffectiveCallingConvention(getRuntimeCC()); 7286 } 7287 7288 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7289 QualType Ty) const { 7290 llvm_unreachable("NVPTX does not support varargs"); 7291 } 7292 7293 void NVPTXTargetCodeGenInfo::setTargetAttributes( 7294 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7295 if (GV->isDeclaration()) 7296 return; 7297 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); 7298 if (VD) { 7299 if (M.getLangOpts().CUDA) { 7300 if (VD->getType()->isCUDADeviceBuiltinSurfaceType()) 7301 addNVVMMetadata(GV, "surface", 1); 7302 else if (VD->getType()->isCUDADeviceBuiltinTextureType()) 7303 addNVVMMetadata(GV, "texture", 1); 7304 return; 7305 } 7306 } 7307 7308 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7309 if (!FD) return; 7310 7311 llvm::Function *F = cast<llvm::Function>(GV); 7312 7313 // Perform special handling in OpenCL mode 7314 if (M.getLangOpts().OpenCL) { 7315 // Use OpenCL function attributes to check for kernel functions 7316 // By default, all functions are device functions 7317 if (FD->hasAttr<OpenCLKernelAttr>()) { 7318 // OpenCL __kernel functions get kernel metadata 7319 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7320 addNVVMMetadata(F, "kernel", 1); 7321 // And kernel functions are not subject to inlining 7322 F->addFnAttr(llvm::Attribute::NoInline); 7323 } 7324 } 7325 7326 // Perform special handling in CUDA mode. 7327 if (M.getLangOpts().CUDA) { 7328 // CUDA __global__ functions get a kernel metadata entry. Since 7329 // __global__ functions cannot be called from the device, we do not 7330 // need to set the noinline attribute. 7331 if (FD->hasAttr<CUDAGlobalAttr>()) { 7332 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7333 addNVVMMetadata(F, "kernel", 1); 7334 } 7335 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 7336 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 7337 llvm::APSInt MaxThreads(32); 7338 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 7339 if (MaxThreads > 0) 7340 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 7341 7342 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 7343 // not specified in __launch_bounds__ or if the user specified a 0 value, 7344 // we don't have to add a PTX directive. 7345 if (Attr->getMinBlocks()) { 7346 llvm::APSInt MinBlocks(32); 7347 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 7348 if (MinBlocks > 0) 7349 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 7350 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 7351 } 7352 } 7353 } 7354 } 7355 7356 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV, 7357 StringRef Name, int Operand) { 7358 llvm::Module *M = GV->getParent(); 7359 llvm::LLVMContext &Ctx = M->getContext(); 7360 7361 // Get "nvvm.annotations" metadata node 7362 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 7363 7364 llvm::Metadata *MDVals[] = { 7365 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name), 7366 llvm::ConstantAsMetadata::get( 7367 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 7368 // Append metadata to nvvm.annotations 7369 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 7370 } 7371 7372 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 7373 return false; 7374 } 7375 } 7376 7377 //===----------------------------------------------------------------------===// 7378 // SystemZ ABI Implementation 7379 //===----------------------------------------------------------------------===// 7380 7381 namespace { 7382 7383 class SystemZABIInfo : public SwiftABIInfo { 7384 bool HasVector; 7385 bool IsSoftFloatABI; 7386 7387 public: 7388 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF) 7389 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {} 7390 7391 bool isPromotableIntegerTypeForABI(QualType Ty) const; 7392 bool isCompoundType(QualType Ty) const; 7393 bool isVectorArgumentType(QualType Ty) const; 7394 bool isFPArgumentType(QualType Ty) const; 7395 QualType GetSingleElementType(QualType Ty) const; 7396 7397 ABIArgInfo classifyReturnType(QualType RetTy) const; 7398 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 7399 7400 void computeInfo(CGFunctionInfo &FI) const override { 7401 if (!getCXXABI().classifyReturnType(FI)) 7402 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7403 for (auto &I : FI.arguments()) 7404 I.info = classifyArgumentType(I.type); 7405 } 7406 7407 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7408 QualType Ty) const override; 7409 7410 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 7411 bool asReturnValue) const override { 7412 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 7413 } 7414 bool isSwiftErrorInRegister() const override { 7415 return false; 7416 } 7417 }; 7418 7419 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 7420 public: 7421 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI) 7422 : TargetCodeGenInfo( 7423 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {} 7424 7425 llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID, 7426 CGBuilderTy &Builder, 7427 CodeGenModule &CGM) const override { 7428 assert(V->getType()->isFloatingPointTy() && "V should have an FP type."); 7429 // Only use TDC in constrained FP mode. 7430 if (!Builder.getIsFPConstrained()) 7431 return nullptr; 7432 7433 llvm::Type *Ty = V->getType(); 7434 if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) { 7435 llvm::Module &M = CGM.getModule(); 7436 auto &Ctx = M.getContext(); 7437 llvm::Function *TDCFunc = 7438 llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty); 7439 unsigned TDCBits = 0; 7440 switch (BuiltinID) { 7441 case Builtin::BI__builtin_isnan: 7442 TDCBits = 0xf; 7443 break; 7444 case Builtin::BIfinite: 7445 case Builtin::BI__finite: 7446 case Builtin::BIfinitef: 7447 case Builtin::BI__finitef: 7448 case Builtin::BIfinitel: 7449 case Builtin::BI__finitel: 7450 case Builtin::BI__builtin_isfinite: 7451 TDCBits = 0xfc0; 7452 break; 7453 case Builtin::BI__builtin_isinf: 7454 TDCBits = 0x30; 7455 break; 7456 default: 7457 break; 7458 } 7459 if (TDCBits) 7460 return Builder.CreateCall( 7461 TDCFunc, 7462 {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)}); 7463 } 7464 return nullptr; 7465 } 7466 }; 7467 } 7468 7469 bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 7470 // Treat an enum type as its underlying type. 7471 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7472 Ty = EnumTy->getDecl()->getIntegerType(); 7473 7474 // Promotable integer types are required to be promoted by the ABI. 7475 if (ABIInfo::isPromotableIntegerTypeForABI(Ty)) 7476 return true; 7477 7478 if (const auto *EIT = Ty->getAs<BitIntType>()) 7479 if (EIT->getNumBits() < 64) 7480 return true; 7481 7482 // 32-bit values must also be promoted. 7483 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7484 switch (BT->getKind()) { 7485 case BuiltinType::Int: 7486 case BuiltinType::UInt: 7487 return true; 7488 default: 7489 return false; 7490 } 7491 return false; 7492 } 7493 7494 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 7495 return (Ty->isAnyComplexType() || 7496 Ty->isVectorType() || 7497 isAggregateTypeForABI(Ty)); 7498 } 7499 7500 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 7501 return (HasVector && 7502 Ty->isVectorType() && 7503 getContext().getTypeSize(Ty) <= 128); 7504 } 7505 7506 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 7507 if (IsSoftFloatABI) 7508 return false; 7509 7510 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7511 switch (BT->getKind()) { 7512 case BuiltinType::Float: 7513 case BuiltinType::Double: 7514 return true; 7515 default: 7516 return false; 7517 } 7518 7519 return false; 7520 } 7521 7522 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 7523 const RecordType *RT = Ty->getAs<RecordType>(); 7524 7525 if (RT && RT->isStructureOrClassType()) { 7526 const RecordDecl *RD = RT->getDecl(); 7527 QualType Found; 7528 7529 // If this is a C++ record, check the bases first. 7530 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 7531 for (const auto &I : CXXRD->bases()) { 7532 QualType Base = I.getType(); 7533 7534 // Empty bases don't affect things either way. 7535 if (isEmptyRecord(getContext(), Base, true)) 7536 continue; 7537 7538 if (!Found.isNull()) 7539 return Ty; 7540 Found = GetSingleElementType(Base); 7541 } 7542 7543 // Check the fields. 7544 for (const auto *FD : RD->fields()) { 7545 // Unlike isSingleElementStruct(), empty structure and array fields 7546 // do count. So do anonymous bitfields that aren't zero-sized. 7547 7548 // Like isSingleElementStruct(), ignore C++20 empty data members. 7549 if (FD->hasAttr<NoUniqueAddressAttr>() && 7550 isEmptyRecord(getContext(), FD->getType(), true)) 7551 continue; 7552 7553 // Unlike isSingleElementStruct(), arrays do not count. 7554 // Nested structures still do though. 7555 if (!Found.isNull()) 7556 return Ty; 7557 Found = GetSingleElementType(FD->getType()); 7558 } 7559 7560 // Unlike isSingleElementStruct(), trailing padding is allowed. 7561 // An 8-byte aligned struct s { float f; } is passed as a double. 7562 if (!Found.isNull()) 7563 return Found; 7564 } 7565 7566 return Ty; 7567 } 7568 7569 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7570 QualType Ty) const { 7571 // Assume that va_list type is correct; should be pointer to LLVM type: 7572 // struct { 7573 // i64 __gpr; 7574 // i64 __fpr; 7575 // i8 *__overflow_arg_area; 7576 // i8 *__reg_save_area; 7577 // }; 7578 7579 // Every non-vector argument occupies 8 bytes and is passed by preference 7580 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 7581 // always passed on the stack. 7582 Ty = getContext().getCanonicalType(Ty); 7583 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7584 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 7585 llvm::Type *DirectTy = ArgTy; 7586 ABIArgInfo AI = classifyArgumentType(Ty); 7587 bool IsIndirect = AI.isIndirect(); 7588 bool InFPRs = false; 7589 bool IsVector = false; 7590 CharUnits UnpaddedSize; 7591 CharUnits DirectAlign; 7592 if (IsIndirect) { 7593 DirectTy = llvm::PointerType::getUnqual(DirectTy); 7594 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 7595 } else { 7596 if (AI.getCoerceToType()) 7597 ArgTy = AI.getCoerceToType(); 7598 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy())); 7599 IsVector = ArgTy->isVectorTy(); 7600 UnpaddedSize = TyInfo.Width; 7601 DirectAlign = TyInfo.Align; 7602 } 7603 CharUnits PaddedSize = CharUnits::fromQuantity(8); 7604 if (IsVector && UnpaddedSize > PaddedSize) 7605 PaddedSize = CharUnits::fromQuantity(16); 7606 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 7607 7608 CharUnits Padding = (PaddedSize - UnpaddedSize); 7609 7610 llvm::Type *IndexTy = CGF.Int64Ty; 7611 llvm::Value *PaddedSizeV = 7612 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 7613 7614 if (IsVector) { 7615 // Work out the address of a vector argument on the stack. 7616 // Vector arguments are always passed in the high bits of a 7617 // single (8 byte) or double (16 byte) stack slot. 7618 Address OverflowArgAreaPtr = 7619 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7620 Address OverflowArgArea = 7621 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7622 CGF.Int8Ty, TyInfo.Align); 7623 Address MemAddr = 7624 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 7625 7626 // Update overflow_arg_area_ptr pointer 7627 llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP( 7628 OverflowArgArea.getElementType(), OverflowArgArea.getPointer(), 7629 PaddedSizeV, "overflow_arg_area"); 7630 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7631 7632 return MemAddr; 7633 } 7634 7635 assert(PaddedSize.getQuantity() == 8); 7636 7637 unsigned MaxRegs, RegCountField, RegSaveIndex; 7638 CharUnits RegPadding; 7639 if (InFPRs) { 7640 MaxRegs = 4; // Maximum of 4 FPR arguments 7641 RegCountField = 1; // __fpr 7642 RegSaveIndex = 16; // save offset for f0 7643 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 7644 } else { 7645 MaxRegs = 5; // Maximum of 5 GPR arguments 7646 RegCountField = 0; // __gpr 7647 RegSaveIndex = 2; // save offset for r2 7648 RegPadding = Padding; // values are passed in the low bits of a GPR 7649 } 7650 7651 Address RegCountPtr = 7652 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 7653 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 7654 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 7655 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 7656 "fits_in_regs"); 7657 7658 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 7659 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 7660 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 7661 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 7662 7663 // Emit code to load the value if it was passed in registers. 7664 CGF.EmitBlock(InRegBlock); 7665 7666 // Work out the address of an argument register. 7667 llvm::Value *ScaledRegCount = 7668 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 7669 llvm::Value *RegBase = 7670 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 7671 + RegPadding.getQuantity()); 7672 llvm::Value *RegOffset = 7673 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 7674 Address RegSaveAreaPtr = 7675 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 7676 llvm::Value *RegSaveArea = 7677 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 7678 Address RawRegAddr( 7679 CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, "raw_reg_addr"), 7680 CGF.Int8Ty, PaddedSize); 7681 Address RegAddr = 7682 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 7683 7684 // Update the register count 7685 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 7686 llvm::Value *NewRegCount = 7687 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 7688 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 7689 CGF.EmitBranch(ContBlock); 7690 7691 // Emit code to load the value if it was passed in memory. 7692 CGF.EmitBlock(InMemBlock); 7693 7694 // Work out the address of a stack argument. 7695 Address OverflowArgAreaPtr = 7696 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7697 Address OverflowArgArea = 7698 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7699 CGF.Int8Ty, PaddedSize); 7700 Address RawMemAddr = 7701 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 7702 Address MemAddr = 7703 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 7704 7705 // Update overflow_arg_area_ptr pointer 7706 llvm::Value *NewOverflowArgArea = 7707 CGF.Builder.CreateGEP(OverflowArgArea.getElementType(), 7708 OverflowArgArea.getPointer(), PaddedSizeV, 7709 "overflow_arg_area"); 7710 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7711 CGF.EmitBranch(ContBlock); 7712 7713 // Return the appropriate result. 7714 CGF.EmitBlock(ContBlock); 7715 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 7716 "va_arg.addr"); 7717 7718 if (IsIndirect) 7719 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy, 7720 TyInfo.Align); 7721 7722 return ResAddr; 7723 } 7724 7725 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 7726 if (RetTy->isVoidType()) 7727 return ABIArgInfo::getIgnore(); 7728 if (isVectorArgumentType(RetTy)) 7729 return ABIArgInfo::getDirect(); 7730 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 7731 return getNaturalAlignIndirect(RetTy); 7732 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 7733 : ABIArgInfo::getDirect()); 7734 } 7735 7736 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 7737 // Handle the generic C++ ABI. 7738 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7739 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7740 7741 // Integers and enums are extended to full register width. 7742 if (isPromotableIntegerTypeForABI(Ty)) 7743 return ABIArgInfo::getExtend(Ty); 7744 7745 // Handle vector types and vector-like structure types. Note that 7746 // as opposed to float-like structure types, we do not allow any 7747 // padding for vector-like structures, so verify the sizes match. 7748 uint64_t Size = getContext().getTypeSize(Ty); 7749 QualType SingleElementTy = GetSingleElementType(Ty); 7750 if (isVectorArgumentType(SingleElementTy) && 7751 getContext().getTypeSize(SingleElementTy) == Size) 7752 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 7753 7754 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 7755 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 7756 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7757 7758 // Handle small structures. 7759 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7760 // Structures with flexible arrays have variable length, so really 7761 // fail the size test above. 7762 const RecordDecl *RD = RT->getDecl(); 7763 if (RD->hasFlexibleArrayMember()) 7764 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7765 7766 // The structure is passed as an unextended integer, a float, or a double. 7767 llvm::Type *PassTy; 7768 if (isFPArgumentType(SingleElementTy)) { 7769 assert(Size == 32 || Size == 64); 7770 if (Size == 32) 7771 PassTy = llvm::Type::getFloatTy(getVMContext()); 7772 else 7773 PassTy = llvm::Type::getDoubleTy(getVMContext()); 7774 } else 7775 PassTy = llvm::IntegerType::get(getVMContext(), Size); 7776 return ABIArgInfo::getDirect(PassTy); 7777 } 7778 7779 // Non-structure compounds are passed indirectly. 7780 if (isCompoundType(Ty)) 7781 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7782 7783 return ABIArgInfo::getDirect(nullptr); 7784 } 7785 7786 //===----------------------------------------------------------------------===// 7787 // MSP430 ABI Implementation 7788 //===----------------------------------------------------------------------===// 7789 7790 namespace { 7791 7792 class MSP430ABIInfo : public DefaultABIInfo { 7793 static ABIArgInfo complexArgInfo() { 7794 ABIArgInfo Info = ABIArgInfo::getDirect(); 7795 Info.setCanBeFlattened(false); 7796 return Info; 7797 } 7798 7799 public: 7800 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7801 7802 ABIArgInfo classifyReturnType(QualType RetTy) const { 7803 if (RetTy->isAnyComplexType()) 7804 return complexArgInfo(); 7805 7806 return DefaultABIInfo::classifyReturnType(RetTy); 7807 } 7808 7809 ABIArgInfo classifyArgumentType(QualType RetTy) const { 7810 if (RetTy->isAnyComplexType()) 7811 return complexArgInfo(); 7812 7813 return DefaultABIInfo::classifyArgumentType(RetTy); 7814 } 7815 7816 // Just copy the original implementations because 7817 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual 7818 void computeInfo(CGFunctionInfo &FI) const override { 7819 if (!getCXXABI().classifyReturnType(FI)) 7820 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7821 for (auto &I : FI.arguments()) 7822 I.info = classifyArgumentType(I.type); 7823 } 7824 7825 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7826 QualType Ty) const override { 7827 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 7828 } 7829 }; 7830 7831 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 7832 public: 7833 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 7834 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {} 7835 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7836 CodeGen::CodeGenModule &M) const override; 7837 }; 7838 7839 } 7840 7841 void MSP430TargetCodeGenInfo::setTargetAttributes( 7842 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7843 if (GV->isDeclaration()) 7844 return; 7845 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 7846 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); 7847 if (!InterruptAttr) 7848 return; 7849 7850 // Handle 'interrupt' attribute: 7851 llvm::Function *F = cast<llvm::Function>(GV); 7852 7853 // Step 1: Set ISR calling convention. 7854 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 7855 7856 // Step 2: Add attributes goodness. 7857 F->addFnAttr(llvm::Attribute::NoInline); 7858 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); 7859 } 7860 } 7861 7862 //===----------------------------------------------------------------------===// 7863 // MIPS ABI Implementation. This works for both little-endian and 7864 // big-endian variants. 7865 //===----------------------------------------------------------------------===// 7866 7867 namespace { 7868 class MipsABIInfo : public ABIInfo { 7869 bool IsO32; 7870 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 7871 void CoerceToIntArgs(uint64_t TySize, 7872 SmallVectorImpl<llvm::Type *> &ArgList) const; 7873 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 7874 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 7875 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 7876 public: 7877 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 7878 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 7879 StackAlignInBytes(IsO32 ? 8 : 16) {} 7880 7881 ABIArgInfo classifyReturnType(QualType RetTy) const; 7882 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 7883 void computeInfo(CGFunctionInfo &FI) const override; 7884 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7885 QualType Ty) const override; 7886 ABIArgInfo extendType(QualType Ty) const; 7887 }; 7888 7889 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 7890 unsigned SizeOfUnwindException; 7891 public: 7892 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 7893 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)), 7894 SizeOfUnwindException(IsO32 ? 24 : 32) {} 7895 7896 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 7897 return 29; 7898 } 7899 7900 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7901 CodeGen::CodeGenModule &CGM) const override { 7902 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7903 if (!FD) return; 7904 llvm::Function *Fn = cast<llvm::Function>(GV); 7905 7906 if (FD->hasAttr<MipsLongCallAttr>()) 7907 Fn->addFnAttr("long-call"); 7908 else if (FD->hasAttr<MipsShortCallAttr>()) 7909 Fn->addFnAttr("short-call"); 7910 7911 // Other attributes do not have a meaning for declarations. 7912 if (GV->isDeclaration()) 7913 return; 7914 7915 if (FD->hasAttr<Mips16Attr>()) { 7916 Fn->addFnAttr("mips16"); 7917 } 7918 else if (FD->hasAttr<NoMips16Attr>()) { 7919 Fn->addFnAttr("nomips16"); 7920 } 7921 7922 if (FD->hasAttr<MicroMipsAttr>()) 7923 Fn->addFnAttr("micromips"); 7924 else if (FD->hasAttr<NoMicroMipsAttr>()) 7925 Fn->addFnAttr("nomicromips"); 7926 7927 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 7928 if (!Attr) 7929 return; 7930 7931 const char *Kind; 7932 switch (Attr->getInterrupt()) { 7933 case MipsInterruptAttr::eic: Kind = "eic"; break; 7934 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 7935 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 7936 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 7937 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 7938 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 7939 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 7940 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 7941 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 7942 } 7943 7944 Fn->addFnAttr("interrupt", Kind); 7945 7946 } 7947 7948 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7949 llvm::Value *Address) const override; 7950 7951 unsigned getSizeOfUnwindException() const override { 7952 return SizeOfUnwindException; 7953 } 7954 }; 7955 } 7956 7957 void MipsABIInfo::CoerceToIntArgs( 7958 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 7959 llvm::IntegerType *IntTy = 7960 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 7961 7962 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 7963 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 7964 ArgList.push_back(IntTy); 7965 7966 // If necessary, add one more integer type to ArgList. 7967 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 7968 7969 if (R) 7970 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 7971 } 7972 7973 // In N32/64, an aligned double precision floating point field is passed in 7974 // a register. 7975 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 7976 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 7977 7978 if (IsO32) { 7979 CoerceToIntArgs(TySize, ArgList); 7980 return llvm::StructType::get(getVMContext(), ArgList); 7981 } 7982 7983 if (Ty->isComplexType()) 7984 return CGT.ConvertType(Ty); 7985 7986 const RecordType *RT = Ty->getAs<RecordType>(); 7987 7988 // Unions/vectors are passed in integer registers. 7989 if (!RT || !RT->isStructureOrClassType()) { 7990 CoerceToIntArgs(TySize, ArgList); 7991 return llvm::StructType::get(getVMContext(), ArgList); 7992 } 7993 7994 const RecordDecl *RD = RT->getDecl(); 7995 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7996 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 7997 7998 uint64_t LastOffset = 0; 7999 unsigned idx = 0; 8000 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 8001 8002 // Iterate over fields in the struct/class and check if there are any aligned 8003 // double fields. 8004 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 8005 i != e; ++i, ++idx) { 8006 const QualType Ty = i->getType(); 8007 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 8008 8009 if (!BT || BT->getKind() != BuiltinType::Double) 8010 continue; 8011 8012 uint64_t Offset = Layout.getFieldOffset(idx); 8013 if (Offset % 64) // Ignore doubles that are not aligned. 8014 continue; 8015 8016 // Add ((Offset - LastOffset) / 64) args of type i64. 8017 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 8018 ArgList.push_back(I64); 8019 8020 // Add double type. 8021 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 8022 LastOffset = Offset + 64; 8023 } 8024 8025 CoerceToIntArgs(TySize - LastOffset, IntArgList); 8026 ArgList.append(IntArgList.begin(), IntArgList.end()); 8027 8028 return llvm::StructType::get(getVMContext(), ArgList); 8029 } 8030 8031 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 8032 uint64_t Offset) const { 8033 if (OrigOffset + MinABIStackAlignInBytes > Offset) 8034 return nullptr; 8035 8036 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 8037 } 8038 8039 ABIArgInfo 8040 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 8041 Ty = useFirstFieldIfTransparentUnion(Ty); 8042 8043 uint64_t OrigOffset = Offset; 8044 uint64_t TySize = getContext().getTypeSize(Ty); 8045 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 8046 8047 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 8048 (uint64_t)StackAlignInBytes); 8049 unsigned CurrOffset = llvm::alignTo(Offset, Align); 8050 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 8051 8052 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 8053 // Ignore empty aggregates. 8054 if (TySize == 0) 8055 return ABIArgInfo::getIgnore(); 8056 8057 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 8058 Offset = OrigOffset + MinABIStackAlignInBytes; 8059 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8060 } 8061 8062 // If we have reached here, aggregates are passed directly by coercing to 8063 // another structure type. Padding is inserted if the offset of the 8064 // aggregate is unaligned. 8065 ABIArgInfo ArgInfo = 8066 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 8067 getPaddingType(OrigOffset, CurrOffset)); 8068 ArgInfo.setInReg(true); 8069 return ArgInfo; 8070 } 8071 8072 // Treat an enum type as its underlying type. 8073 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8074 Ty = EnumTy->getDecl()->getIntegerType(); 8075 8076 // Make sure we pass indirectly things that are too large. 8077 if (const auto *EIT = Ty->getAs<BitIntType>()) 8078 if (EIT->getNumBits() > 128 || 8079 (EIT->getNumBits() > 64 && 8080 !getContext().getTargetInfo().hasInt128Type())) 8081 return getNaturalAlignIndirect(Ty); 8082 8083 // All integral types are promoted to the GPR width. 8084 if (Ty->isIntegralOrEnumerationType()) 8085 return extendType(Ty); 8086 8087 return ABIArgInfo::getDirect( 8088 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 8089 } 8090 8091 llvm::Type* 8092 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 8093 const RecordType *RT = RetTy->getAs<RecordType>(); 8094 SmallVector<llvm::Type*, 8> RTList; 8095 8096 if (RT && RT->isStructureOrClassType()) { 8097 const RecordDecl *RD = RT->getDecl(); 8098 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 8099 unsigned FieldCnt = Layout.getFieldCount(); 8100 8101 // N32/64 returns struct/classes in floating point registers if the 8102 // following conditions are met: 8103 // 1. The size of the struct/class is no larger than 128-bit. 8104 // 2. The struct/class has one or two fields all of which are floating 8105 // point types. 8106 // 3. The offset of the first field is zero (this follows what gcc does). 8107 // 8108 // Any other composite results are returned in integer registers. 8109 // 8110 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 8111 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 8112 for (; b != e; ++b) { 8113 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 8114 8115 if (!BT || !BT->isFloatingPoint()) 8116 break; 8117 8118 RTList.push_back(CGT.ConvertType(b->getType())); 8119 } 8120 8121 if (b == e) 8122 return llvm::StructType::get(getVMContext(), RTList, 8123 RD->hasAttr<PackedAttr>()); 8124 8125 RTList.clear(); 8126 } 8127 } 8128 8129 CoerceToIntArgs(Size, RTList); 8130 return llvm::StructType::get(getVMContext(), RTList); 8131 } 8132 8133 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 8134 uint64_t Size = getContext().getTypeSize(RetTy); 8135 8136 if (RetTy->isVoidType()) 8137 return ABIArgInfo::getIgnore(); 8138 8139 // O32 doesn't treat zero-sized structs differently from other structs. 8140 // However, N32/N64 ignores zero sized return values. 8141 if (!IsO32 && Size == 0) 8142 return ABIArgInfo::getIgnore(); 8143 8144 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 8145 if (Size <= 128) { 8146 if (RetTy->isAnyComplexType()) 8147 return ABIArgInfo::getDirect(); 8148 8149 // O32 returns integer vectors in registers and N32/N64 returns all small 8150 // aggregates in registers. 8151 if (!IsO32 || 8152 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 8153 ABIArgInfo ArgInfo = 8154 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 8155 ArgInfo.setInReg(true); 8156 return ArgInfo; 8157 } 8158 } 8159 8160 return getNaturalAlignIndirect(RetTy); 8161 } 8162 8163 // Treat an enum type as its underlying type. 8164 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 8165 RetTy = EnumTy->getDecl()->getIntegerType(); 8166 8167 // Make sure we pass indirectly things that are too large. 8168 if (const auto *EIT = RetTy->getAs<BitIntType>()) 8169 if (EIT->getNumBits() > 128 || 8170 (EIT->getNumBits() > 64 && 8171 !getContext().getTargetInfo().hasInt128Type())) 8172 return getNaturalAlignIndirect(RetTy); 8173 8174 if (isPromotableIntegerTypeForABI(RetTy)) 8175 return ABIArgInfo::getExtend(RetTy); 8176 8177 if ((RetTy->isUnsignedIntegerOrEnumerationType() || 8178 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) 8179 return ABIArgInfo::getSignExtend(RetTy); 8180 8181 return ABIArgInfo::getDirect(); 8182 } 8183 8184 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 8185 ABIArgInfo &RetInfo = FI.getReturnInfo(); 8186 if (!getCXXABI().classifyReturnType(FI)) 8187 RetInfo = classifyReturnType(FI.getReturnType()); 8188 8189 // Check if a pointer to an aggregate is passed as a hidden argument. 8190 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 8191 8192 for (auto &I : FI.arguments()) 8193 I.info = classifyArgumentType(I.type, Offset); 8194 } 8195 8196 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8197 QualType OrigTy) const { 8198 QualType Ty = OrigTy; 8199 8200 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 8201 // Pointers are also promoted in the same way but this only matters for N32. 8202 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 8203 unsigned PtrWidth = getTarget().getPointerWidth(0); 8204 bool DidPromote = false; 8205 if ((Ty->isIntegerType() && 8206 getContext().getIntWidth(Ty) < SlotSizeInBits) || 8207 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 8208 DidPromote = true; 8209 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 8210 Ty->isSignedIntegerType()); 8211 } 8212 8213 auto TyInfo = getContext().getTypeInfoInChars(Ty); 8214 8215 // The alignment of things in the argument area is never larger than 8216 // StackAlignInBytes. 8217 TyInfo.Align = 8218 std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes)); 8219 8220 // MinABIStackAlignInBytes is the size of argument slots on the stack. 8221 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 8222 8223 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 8224 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 8225 8226 8227 // If there was a promotion, "unpromote" into a temporary. 8228 // TODO: can we just use a pointer into a subset of the original slot? 8229 if (DidPromote) { 8230 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 8231 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 8232 8233 // Truncate down to the right width. 8234 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 8235 : CGF.IntPtrTy); 8236 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 8237 if (OrigTy->isPointerType()) 8238 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 8239 8240 CGF.Builder.CreateStore(V, Temp); 8241 Addr = Temp; 8242 } 8243 8244 return Addr; 8245 } 8246 8247 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { 8248 int TySize = getContext().getTypeSize(Ty); 8249 8250 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 8251 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 8252 return ABIArgInfo::getSignExtend(Ty); 8253 8254 return ABIArgInfo::getExtend(Ty); 8255 } 8256 8257 bool 8258 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8259 llvm::Value *Address) const { 8260 // This information comes from gcc's implementation, which seems to 8261 // as canonical as it gets. 8262 8263 // Everything on MIPS is 4 bytes. Double-precision FP registers 8264 // are aliased to pairs of single-precision FP registers. 8265 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 8266 8267 // 0-31 are the general purpose registers, $0 - $31. 8268 // 32-63 are the floating-point registers, $f0 - $f31. 8269 // 64 and 65 are the multiply/divide registers, $hi and $lo. 8270 // 66 is the (notional, I think) register for signal-handler return. 8271 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 8272 8273 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 8274 // They are one bit wide and ignored here. 8275 8276 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 8277 // (coprocessor 1 is the FP unit) 8278 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 8279 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 8280 // 176-181 are the DSP accumulator registers. 8281 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 8282 return false; 8283 } 8284 8285 //===----------------------------------------------------------------------===// 8286 // M68k ABI Implementation 8287 //===----------------------------------------------------------------------===// 8288 8289 namespace { 8290 8291 class M68kTargetCodeGenInfo : public TargetCodeGenInfo { 8292 public: 8293 M68kTargetCodeGenInfo(CodeGenTypes &CGT) 8294 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 8295 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8296 CodeGen::CodeGenModule &M) const override; 8297 }; 8298 8299 } // namespace 8300 8301 void M68kTargetCodeGenInfo::setTargetAttributes( 8302 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 8303 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 8304 if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) { 8305 // Handle 'interrupt' attribute: 8306 llvm::Function *F = cast<llvm::Function>(GV); 8307 8308 // Step 1: Set ISR calling convention. 8309 F->setCallingConv(llvm::CallingConv::M68k_INTR); 8310 8311 // Step 2: Add attributes goodness. 8312 F->addFnAttr(llvm::Attribute::NoInline); 8313 8314 // Step 3: Emit ISR vector alias. 8315 unsigned Num = attr->getNumber() / 2; 8316 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 8317 "__isr_" + Twine(Num), F); 8318 } 8319 } 8320 } 8321 8322 //===----------------------------------------------------------------------===// 8323 // AVR ABI Implementation. Documented at 8324 // https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention 8325 // https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny 8326 //===----------------------------------------------------------------------===// 8327 8328 namespace { 8329 class AVRABIInfo : public DefaultABIInfo { 8330 private: 8331 // The total amount of registers can be used to pass parameters. It is 18 on 8332 // AVR, or 6 on AVRTiny. 8333 const unsigned ParamRegs; 8334 // The total amount of registers can be used to pass return value. It is 8 on 8335 // AVR, or 4 on AVRTiny. 8336 const unsigned RetRegs; 8337 8338 public: 8339 AVRABIInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR) 8340 : DefaultABIInfo(CGT), ParamRegs(NPR), RetRegs(NRR) {} 8341 8342 ABIArgInfo classifyReturnType(QualType Ty, bool &LargeRet) const { 8343 if (isAggregateTypeForABI(Ty)) { 8344 // On AVR, a return struct with size less than or equals to 8 bytes is 8345 // returned directly via registers R18-R25. On AVRTiny, a return struct 8346 // with size less than or equals to 4 bytes is returned directly via 8347 // registers R22-R25. 8348 if (getContext().getTypeSize(Ty) <= RetRegs * 8) 8349 return ABIArgInfo::getDirect(); 8350 // A return struct with larger size is returned via a stack 8351 // slot, along with a pointer to it as the function's implicit argument. 8352 LargeRet = true; 8353 return getNaturalAlignIndirect(Ty); 8354 } 8355 // Otherwise we follow the default way which is compatible. 8356 return DefaultABIInfo::classifyReturnType(Ty); 8357 } 8358 8359 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegs) const { 8360 unsigned TySize = getContext().getTypeSize(Ty); 8361 8362 // An int8 type argument always costs two registers like an int16. 8363 if (TySize == 8 && NumRegs >= 2) { 8364 NumRegs -= 2; 8365 return ABIArgInfo::getExtend(Ty); 8366 } 8367 8368 // If the argument size is an odd number of bytes, round up the size 8369 // to the next even number. 8370 TySize = llvm::alignTo(TySize, 16); 8371 8372 // Any type including an array/struct type can be passed in rgisters, 8373 // if there are enough registers left. 8374 if (TySize <= NumRegs * 8) { 8375 NumRegs -= TySize / 8; 8376 return ABIArgInfo::getDirect(); 8377 } 8378 8379 // An argument is passed either completely in registers or completely in 8380 // memory. Since there are not enough registers left, current argument 8381 // and all other unprocessed arguments should be passed in memory. 8382 // However we still need to return `ABIArgInfo::getDirect()` other than 8383 // `ABIInfo::getNaturalAlignIndirect(Ty)`, otherwise an extra stack slot 8384 // will be allocated, so the stack frame layout will be incompatible with 8385 // avr-gcc. 8386 NumRegs = 0; 8387 return ABIArgInfo::getDirect(); 8388 } 8389 8390 void computeInfo(CGFunctionInfo &FI) const override { 8391 // Decide the return type. 8392 bool LargeRet = false; 8393 if (!getCXXABI().classifyReturnType(FI)) 8394 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), LargeRet); 8395 8396 // Decide each argument type. The total number of registers can be used for 8397 // arguments depends on several factors: 8398 // 1. Arguments of varargs functions are passed on the stack. This applies 8399 // even to the named arguments. So no register can be used. 8400 // 2. Total 18 registers can be used on avr and 6 ones on avrtiny. 8401 // 3. If the return type is a struct with too large size, two registers 8402 // (out of 18/6) will be cost as an implicit pointer argument. 8403 unsigned NumRegs = ParamRegs; 8404 if (FI.isVariadic()) 8405 NumRegs = 0; 8406 else if (LargeRet) 8407 NumRegs -= 2; 8408 for (auto &I : FI.arguments()) 8409 I.info = classifyArgumentType(I.type, NumRegs); 8410 } 8411 }; 8412 8413 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 8414 public: 8415 AVRTargetCodeGenInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR) 8416 : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT, NPR, NRR)) {} 8417 8418 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, 8419 const VarDecl *D) const override { 8420 // Check if global/static variable is defined in address space 8421 // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5) 8422 // but not constant. 8423 if (D) { 8424 LangAS AS = D->getType().getAddressSpace(); 8425 if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) && 8426 toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified()) 8427 CGM.getDiags().Report(D->getLocation(), 8428 diag::err_verify_nonconst_addrspace) 8429 << "__flash*"; 8430 } 8431 return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D); 8432 } 8433 8434 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8435 CodeGen::CodeGenModule &CGM) const override { 8436 if (GV->isDeclaration()) 8437 return; 8438 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 8439 if (!FD) return; 8440 auto *Fn = cast<llvm::Function>(GV); 8441 8442 if (FD->getAttr<AVRInterruptAttr>()) 8443 Fn->addFnAttr("interrupt"); 8444 8445 if (FD->getAttr<AVRSignalAttr>()) 8446 Fn->addFnAttr("signal"); 8447 } 8448 }; 8449 } 8450 8451 //===----------------------------------------------------------------------===// 8452 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 8453 // Currently subclassed only to implement custom OpenCL C function attribute 8454 // handling. 8455 //===----------------------------------------------------------------------===// 8456 8457 namespace { 8458 8459 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 8460 public: 8461 TCETargetCodeGenInfo(CodeGenTypes &CGT) 8462 : DefaultTargetCodeGenInfo(CGT) {} 8463 8464 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8465 CodeGen::CodeGenModule &M) const override; 8466 }; 8467 8468 void TCETargetCodeGenInfo::setTargetAttributes( 8469 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 8470 if (GV->isDeclaration()) 8471 return; 8472 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8473 if (!FD) return; 8474 8475 llvm::Function *F = cast<llvm::Function>(GV); 8476 8477 if (M.getLangOpts().OpenCL) { 8478 if (FD->hasAttr<OpenCLKernelAttr>()) { 8479 // OpenCL C Kernel functions are not subject to inlining 8480 F->addFnAttr(llvm::Attribute::NoInline); 8481 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 8482 if (Attr) { 8483 // Convert the reqd_work_group_size() attributes to metadata. 8484 llvm::LLVMContext &Context = F->getContext(); 8485 llvm::NamedMDNode *OpenCLMetadata = 8486 M.getModule().getOrInsertNamedMetadata( 8487 "opencl.kernel_wg_size_info"); 8488 8489 SmallVector<llvm::Metadata *, 5> Operands; 8490 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 8491 8492 Operands.push_back( 8493 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8494 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 8495 Operands.push_back( 8496 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8497 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 8498 Operands.push_back( 8499 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8500 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 8501 8502 // Add a boolean constant operand for "required" (true) or "hint" 8503 // (false) for implementing the work_group_size_hint attr later. 8504 // Currently always true as the hint is not yet implemented. 8505 Operands.push_back( 8506 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 8507 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 8508 } 8509 } 8510 } 8511 } 8512 8513 } 8514 8515 //===----------------------------------------------------------------------===// 8516 // Hexagon ABI Implementation 8517 //===----------------------------------------------------------------------===// 8518 8519 namespace { 8520 8521 class HexagonABIInfo : public DefaultABIInfo { 8522 public: 8523 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8524 8525 private: 8526 ABIArgInfo classifyReturnType(QualType RetTy) const; 8527 ABIArgInfo classifyArgumentType(QualType RetTy) const; 8528 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; 8529 8530 void computeInfo(CGFunctionInfo &FI) const override; 8531 8532 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8533 QualType Ty) const override; 8534 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, 8535 QualType Ty) const; 8536 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, 8537 QualType Ty) const; 8538 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, 8539 QualType Ty) const; 8540 }; 8541 8542 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 8543 public: 8544 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 8545 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {} 8546 8547 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 8548 return 29; 8549 } 8550 8551 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8552 CodeGen::CodeGenModule &GCM) const override { 8553 if (GV->isDeclaration()) 8554 return; 8555 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8556 if (!FD) 8557 return; 8558 } 8559 }; 8560 8561 } // namespace 8562 8563 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 8564 unsigned RegsLeft = 6; 8565 if (!getCXXABI().classifyReturnType(FI)) 8566 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8567 for (auto &I : FI.arguments()) 8568 I.info = classifyArgumentType(I.type, &RegsLeft); 8569 } 8570 8571 static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { 8572 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" 8573 " through registers"); 8574 8575 if (*RegsLeft == 0) 8576 return false; 8577 8578 if (Size <= 32) { 8579 (*RegsLeft)--; 8580 return true; 8581 } 8582 8583 if (2 <= (*RegsLeft & (~1U))) { 8584 *RegsLeft = (*RegsLeft & (~1U)) - 2; 8585 return true; 8586 } 8587 8588 // Next available register was r5 but candidate was greater than 32-bits so it 8589 // has to go on the stack. However we still consume r5 8590 if (*RegsLeft == 1) 8591 *RegsLeft = 0; 8592 8593 return false; 8594 } 8595 8596 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, 8597 unsigned *RegsLeft) const { 8598 if (!isAggregateTypeForABI(Ty)) { 8599 // Treat an enum type as its underlying type. 8600 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8601 Ty = EnumTy->getDecl()->getIntegerType(); 8602 8603 uint64_t Size = getContext().getTypeSize(Ty); 8604 if (Size <= 64) 8605 HexagonAdjustRegsLeft(Size, RegsLeft); 8606 8607 if (Size > 64 && Ty->isBitIntType()) 8608 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8609 8610 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 8611 : ABIArgInfo::getDirect(); 8612 } 8613 8614 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 8615 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8616 8617 // Ignore empty records. 8618 if (isEmptyRecord(getContext(), Ty, true)) 8619 return ABIArgInfo::getIgnore(); 8620 8621 uint64_t Size = getContext().getTypeSize(Ty); 8622 unsigned Align = getContext().getTypeAlign(Ty); 8623 8624 if (Size > 64) 8625 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8626 8627 if (HexagonAdjustRegsLeft(Size, RegsLeft)) 8628 Align = Size <= 32 ? 32 : 64; 8629 if (Size <= Align) { 8630 // Pass in the smallest viable integer type. 8631 if (!llvm::isPowerOf2_64(Size)) 8632 Size = llvm::NextPowerOf2(Size); 8633 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8634 } 8635 return DefaultABIInfo::classifyArgumentType(Ty); 8636 } 8637 8638 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 8639 if (RetTy->isVoidType()) 8640 return ABIArgInfo::getIgnore(); 8641 8642 const TargetInfo &T = CGT.getTarget(); 8643 uint64_t Size = getContext().getTypeSize(RetTy); 8644 8645 if (RetTy->getAs<VectorType>()) { 8646 // HVX vectors are returned in vector registers or register pairs. 8647 if (T.hasFeature("hvx")) { 8648 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); 8649 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; 8650 if (Size == VecSize || Size == 2*VecSize) 8651 return ABIArgInfo::getDirectInReg(); 8652 } 8653 // Large vector types should be returned via memory. 8654 if (Size > 64) 8655 return getNaturalAlignIndirect(RetTy); 8656 } 8657 8658 if (!isAggregateTypeForABI(RetTy)) { 8659 // Treat an enum type as its underlying type. 8660 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 8661 RetTy = EnumTy->getDecl()->getIntegerType(); 8662 8663 if (Size > 64 && RetTy->isBitIntType()) 8664 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 8665 8666 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 8667 : ABIArgInfo::getDirect(); 8668 } 8669 8670 if (isEmptyRecord(getContext(), RetTy, true)) 8671 return ABIArgInfo::getIgnore(); 8672 8673 // Aggregates <= 8 bytes are returned in registers, other aggregates 8674 // are returned indirectly. 8675 if (Size <= 64) { 8676 // Return in the smallest viable integer type. 8677 if (!llvm::isPowerOf2_64(Size)) 8678 Size = llvm::NextPowerOf2(Size); 8679 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8680 } 8681 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 8682 } 8683 8684 Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, 8685 Address VAListAddr, 8686 QualType Ty) const { 8687 // Load the overflow area pointer. 8688 Address __overflow_area_pointer_p = 8689 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8690 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8691 __overflow_area_pointer_p, "__overflow_area_pointer"); 8692 8693 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 8694 if (Align > 4) { 8695 // Alignment should be a power of 2. 8696 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); 8697 8698 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 8699 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 8700 8701 // Add offset to the current pointer to access the argument. 8702 __overflow_area_pointer = 8703 CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset); 8704 llvm::Value *AsInt = 8705 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8706 8707 // Create a mask which should be "AND"ed 8708 // with (overflow_arg_area + align - 1) 8709 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); 8710 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8711 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), 8712 "__overflow_area_pointer.align"); 8713 } 8714 8715 // Get the type of the argument from memory and bitcast 8716 // overflow area pointer to the argument type. 8717 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); 8718 Address AddrTyped = CGF.Builder.CreateElementBitCast( 8719 Address(__overflow_area_pointer, CGF.Int8Ty, 8720 CharUnits::fromQuantity(Align)), 8721 PTy); 8722 8723 // Round up to the minimum stack alignment for varargs which is 4 bytes. 8724 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8725 8726 __overflow_area_pointer = CGF.Builder.CreateGEP( 8727 CGF.Int8Ty, __overflow_area_pointer, 8728 llvm::ConstantInt::get(CGF.Int32Ty, Offset), 8729 "__overflow_area_pointer.next"); 8730 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); 8731 8732 return AddrTyped; 8733 } 8734 8735 Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, 8736 Address VAListAddr, 8737 QualType Ty) const { 8738 // FIXME: Need to handle alignment 8739 llvm::Type *BP = CGF.Int8PtrTy; 8740 CGBuilderTy &Builder = CGF.Builder; 8741 Address VAListAddrAsBPP = Builder.CreateElementBitCast(VAListAddr, BP, "ap"); 8742 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 8743 // Handle address alignment for type alignment > 32 bits 8744 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 8745 if (TyAlign > 4) { 8746 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); 8747 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 8748 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 8749 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 8750 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 8751 } 8752 Address AddrTyped = Builder.CreateElementBitCast( 8753 Address(Addr, CGF.Int8Ty, CharUnits::fromQuantity(TyAlign)), 8754 CGF.ConvertType(Ty)); 8755 8756 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8757 llvm::Value *NextAddr = Builder.CreateGEP( 8758 CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 8759 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 8760 8761 return AddrTyped; 8762 } 8763 8764 Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, 8765 Address VAListAddr, 8766 QualType Ty) const { 8767 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; 8768 8769 if (ArgSize > 8) 8770 return EmitVAArgFromMemory(CGF, VAListAddr, Ty); 8771 8772 // Here we have check if the argument is in register area or 8773 // in overflow area. 8774 // If the saved register area pointer + argsize rounded up to alignment > 8775 // saved register area end pointer, argument is in overflow area. 8776 unsigned RegsLeft = 6; 8777 Ty = CGF.getContext().getCanonicalType(Ty); 8778 (void)classifyArgumentType(Ty, &RegsLeft); 8779 8780 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 8781 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 8782 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 8783 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 8784 8785 // Get rounded size of the argument.GCC does not allow vararg of 8786 // size < 4 bytes. We follow the same logic here. 8787 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8788 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8789 8790 // Argument may be in saved register area 8791 CGF.EmitBlock(MaybeRegBlock); 8792 8793 // Load the current saved register area pointer. 8794 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( 8795 VAListAddr, 0, "__current_saved_reg_area_pointer_p"); 8796 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( 8797 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); 8798 8799 // Load the saved register area end pointer. 8800 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( 8801 VAListAddr, 1, "__saved_reg_area_end_pointer_p"); 8802 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( 8803 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); 8804 8805 // If the size of argument is > 4 bytes, check if the stack 8806 // location is aligned to 8 bytes 8807 if (ArgAlign > 4) { 8808 8809 llvm::Value *__current_saved_reg_area_pointer_int = 8810 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, 8811 CGF.Int32Ty); 8812 8813 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( 8814 __current_saved_reg_area_pointer_int, 8815 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), 8816 "align_current_saved_reg_area_pointer"); 8817 8818 __current_saved_reg_area_pointer_int = 8819 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, 8820 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8821 "align_current_saved_reg_area_pointer"); 8822 8823 __current_saved_reg_area_pointer = 8824 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, 8825 __current_saved_reg_area_pointer->getType(), 8826 "align_current_saved_reg_area_pointer"); 8827 } 8828 8829 llvm::Value *__new_saved_reg_area_pointer = 8830 CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer, 8831 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8832 "__new_saved_reg_area_pointer"); 8833 8834 llvm::Value *UsingStack = nullptr; 8835 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, 8836 __saved_reg_area_end_pointer); 8837 8838 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); 8839 8840 // Argument in saved register area 8841 // Implement the block where argument is in register saved area 8842 CGF.EmitBlock(InRegBlock); 8843 8844 llvm::Type *PTy = CGF.ConvertType(Ty); 8845 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( 8846 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); 8847 8848 CGF.Builder.CreateStore(__new_saved_reg_area_pointer, 8849 __current_saved_reg_area_pointer_p); 8850 8851 CGF.EmitBranch(ContBlock); 8852 8853 // Argument in overflow area 8854 // Implement the block where the argument is in overflow area. 8855 CGF.EmitBlock(OnStackBlock); 8856 8857 // Load the overflow area pointer 8858 Address __overflow_area_pointer_p = 8859 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8860 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8861 __overflow_area_pointer_p, "__overflow_area_pointer"); 8862 8863 // Align the overflow area pointer according to the alignment of the argument 8864 if (ArgAlign > 4) { 8865 llvm::Value *__overflow_area_pointer_int = 8866 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8867 8868 __overflow_area_pointer_int = 8869 CGF.Builder.CreateAdd(__overflow_area_pointer_int, 8870 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), 8871 "align_overflow_area_pointer"); 8872 8873 __overflow_area_pointer_int = 8874 CGF.Builder.CreateAnd(__overflow_area_pointer_int, 8875 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8876 "align_overflow_area_pointer"); 8877 8878 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8879 __overflow_area_pointer_int, __overflow_area_pointer->getType(), 8880 "align_overflow_area_pointer"); 8881 } 8882 8883 // Get the pointer for next argument in overflow area and store it 8884 // to overflow area pointer. 8885 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( 8886 CGF.Int8Ty, __overflow_area_pointer, 8887 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8888 "__overflow_area_pointer.next"); 8889 8890 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8891 __overflow_area_pointer_p); 8892 8893 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8894 __current_saved_reg_area_pointer_p); 8895 8896 // Bitcast the overflow area pointer to the type of argument. 8897 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); 8898 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( 8899 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); 8900 8901 CGF.EmitBranch(ContBlock); 8902 8903 // Get the correct pointer to load the variable argument 8904 // Implement the ContBlock 8905 CGF.EmitBlock(ContBlock); 8906 8907 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 8908 llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy); 8909 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); 8910 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); 8911 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); 8912 8913 return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign)); 8914 } 8915 8916 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8917 QualType Ty) const { 8918 8919 if (getTarget().getTriple().isMusl()) 8920 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); 8921 8922 return EmitVAArgForHexagon(CGF, VAListAddr, Ty); 8923 } 8924 8925 //===----------------------------------------------------------------------===// 8926 // Lanai ABI Implementation 8927 //===----------------------------------------------------------------------===// 8928 8929 namespace { 8930 class LanaiABIInfo : public DefaultABIInfo { 8931 public: 8932 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8933 8934 bool shouldUseInReg(QualType Ty, CCState &State) const; 8935 8936 void computeInfo(CGFunctionInfo &FI) const override { 8937 CCState State(FI); 8938 // Lanai uses 4 registers to pass arguments unless the function has the 8939 // regparm attribute set. 8940 if (FI.getHasRegParm()) { 8941 State.FreeRegs = FI.getRegParm(); 8942 } else { 8943 State.FreeRegs = 4; 8944 } 8945 8946 if (!getCXXABI().classifyReturnType(FI)) 8947 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8948 for (auto &I : FI.arguments()) 8949 I.info = classifyArgumentType(I.type, State); 8950 } 8951 8952 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 8953 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 8954 }; 8955 } // end anonymous namespace 8956 8957 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 8958 unsigned Size = getContext().getTypeSize(Ty); 8959 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 8960 8961 if (SizeInRegs == 0) 8962 return false; 8963 8964 if (SizeInRegs > State.FreeRegs) { 8965 State.FreeRegs = 0; 8966 return false; 8967 } 8968 8969 State.FreeRegs -= SizeInRegs; 8970 8971 return true; 8972 } 8973 8974 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 8975 CCState &State) const { 8976 if (!ByVal) { 8977 if (State.FreeRegs) { 8978 --State.FreeRegs; // Non-byval indirects just use one pointer. 8979 return getNaturalAlignIndirectInReg(Ty); 8980 } 8981 return getNaturalAlignIndirect(Ty, false); 8982 } 8983 8984 // Compute the byval alignment. 8985 const unsigned MinABIStackAlignInBytes = 4; 8986 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 8987 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 8988 /*Realign=*/TypeAlign > 8989 MinABIStackAlignInBytes); 8990 } 8991 8992 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 8993 CCState &State) const { 8994 // Check with the C++ ABI first. 8995 const RecordType *RT = Ty->getAs<RecordType>(); 8996 if (RT) { 8997 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 8998 if (RAA == CGCXXABI::RAA_Indirect) { 8999 return getIndirectResult(Ty, /*ByVal=*/false, State); 9000 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 9001 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 9002 } 9003 } 9004 9005 if (isAggregateTypeForABI(Ty)) { 9006 // Structures with flexible arrays are always indirect. 9007 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 9008 return getIndirectResult(Ty, /*ByVal=*/true, State); 9009 9010 // Ignore empty structs/unions. 9011 if (isEmptyRecord(getContext(), Ty, true)) 9012 return ABIArgInfo::getIgnore(); 9013 9014 llvm::LLVMContext &LLVMContext = getVMContext(); 9015 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 9016 if (SizeInRegs <= State.FreeRegs) { 9017 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 9018 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 9019 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 9020 State.FreeRegs -= SizeInRegs; 9021 return ABIArgInfo::getDirectInReg(Result); 9022 } else { 9023 State.FreeRegs = 0; 9024 } 9025 return getIndirectResult(Ty, true, State); 9026 } 9027 9028 // Treat an enum type as its underlying type. 9029 if (const auto *EnumTy = Ty->getAs<EnumType>()) 9030 Ty = EnumTy->getDecl()->getIntegerType(); 9031 9032 bool InReg = shouldUseInReg(Ty, State); 9033 9034 // Don't pass >64 bit integers in registers. 9035 if (const auto *EIT = Ty->getAs<BitIntType>()) 9036 if (EIT->getNumBits() > 64) 9037 return getIndirectResult(Ty, /*ByVal=*/true, State); 9038 9039 if (isPromotableIntegerTypeForABI(Ty)) { 9040 if (InReg) 9041 return ABIArgInfo::getDirectInReg(); 9042 return ABIArgInfo::getExtend(Ty); 9043 } 9044 if (InReg) 9045 return ABIArgInfo::getDirectInReg(); 9046 return ABIArgInfo::getDirect(); 9047 } 9048 9049 namespace { 9050 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 9051 public: 9052 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 9053 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {} 9054 }; 9055 } 9056 9057 //===----------------------------------------------------------------------===// 9058 // AMDGPU ABI Implementation 9059 //===----------------------------------------------------------------------===// 9060 9061 namespace { 9062 9063 class AMDGPUABIInfo final : public DefaultABIInfo { 9064 private: 9065 static const unsigned MaxNumRegsForArgsRet = 16; 9066 9067 unsigned numRegsForType(QualType Ty) const; 9068 9069 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 9070 bool isHomogeneousAggregateSmallEnough(const Type *Base, 9071 uint64_t Members) const override; 9072 9073 // Coerce HIP scalar pointer arguments from generic pointers to global ones. 9074 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS, 9075 unsigned ToAS) const { 9076 // Single value types. 9077 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty); 9078 if (PtrTy && PtrTy->getAddressSpace() == FromAS) 9079 return llvm::PointerType::getWithSamePointeeType(PtrTy, ToAS); 9080 return Ty; 9081 } 9082 9083 public: 9084 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : 9085 DefaultABIInfo(CGT) {} 9086 9087 ABIArgInfo classifyReturnType(QualType RetTy) const; 9088 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 9089 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; 9090 9091 void computeInfo(CGFunctionInfo &FI) const override; 9092 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9093 QualType Ty) const override; 9094 }; 9095 9096 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 9097 return true; 9098 } 9099 9100 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( 9101 const Type *Base, uint64_t Members) const { 9102 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; 9103 9104 // Homogeneous Aggregates may occupy at most 16 registers. 9105 return Members * NumRegs <= MaxNumRegsForArgsRet; 9106 } 9107 9108 /// Estimate number of registers the type will use when passed in registers. 9109 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { 9110 unsigned NumRegs = 0; 9111 9112 if (const VectorType *VT = Ty->getAs<VectorType>()) { 9113 // Compute from the number of elements. The reported size is based on the 9114 // in-memory size, which includes the padding 4th element for 3-vectors. 9115 QualType EltTy = VT->getElementType(); 9116 unsigned EltSize = getContext().getTypeSize(EltTy); 9117 9118 // 16-bit element vectors should be passed as packed. 9119 if (EltSize == 16) 9120 return (VT->getNumElements() + 1) / 2; 9121 9122 unsigned EltNumRegs = (EltSize + 31) / 32; 9123 return EltNumRegs * VT->getNumElements(); 9124 } 9125 9126 if (const RecordType *RT = Ty->getAs<RecordType>()) { 9127 const RecordDecl *RD = RT->getDecl(); 9128 assert(!RD->hasFlexibleArrayMember()); 9129 9130 for (const FieldDecl *Field : RD->fields()) { 9131 QualType FieldTy = Field->getType(); 9132 NumRegs += numRegsForType(FieldTy); 9133 } 9134 9135 return NumRegs; 9136 } 9137 9138 return (getContext().getTypeSize(Ty) + 31) / 32; 9139 } 9140 9141 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 9142 llvm::CallingConv::ID CC = FI.getCallingConvention(); 9143 9144 if (!getCXXABI().classifyReturnType(FI)) 9145 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9146 9147 unsigned NumRegsLeft = MaxNumRegsForArgsRet; 9148 for (auto &Arg : FI.arguments()) { 9149 if (CC == llvm::CallingConv::AMDGPU_KERNEL) { 9150 Arg.info = classifyKernelArgumentType(Arg.type); 9151 } else { 9152 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); 9153 } 9154 } 9155 } 9156 9157 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9158 QualType Ty) const { 9159 llvm_unreachable("AMDGPU does not support varargs"); 9160 } 9161 9162 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { 9163 if (isAggregateTypeForABI(RetTy)) { 9164 // Records with non-trivial destructors/copy-constructors should not be 9165 // returned by value. 9166 if (!getRecordArgABI(RetTy, getCXXABI())) { 9167 // Ignore empty structs/unions. 9168 if (isEmptyRecord(getContext(), RetTy, true)) 9169 return ABIArgInfo::getIgnore(); 9170 9171 // Lower single-element structs to just return a regular value. 9172 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 9173 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 9174 9175 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 9176 const RecordDecl *RD = RT->getDecl(); 9177 if (RD->hasFlexibleArrayMember()) 9178 return DefaultABIInfo::classifyReturnType(RetTy); 9179 } 9180 9181 // Pack aggregates <= 4 bytes into single VGPR or pair. 9182 uint64_t Size = getContext().getTypeSize(RetTy); 9183 if (Size <= 16) 9184 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 9185 9186 if (Size <= 32) 9187 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 9188 9189 if (Size <= 64) { 9190 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 9191 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 9192 } 9193 9194 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) 9195 return ABIArgInfo::getDirect(); 9196 } 9197 } 9198 9199 // Otherwise just do the default thing. 9200 return DefaultABIInfo::classifyReturnType(RetTy); 9201 } 9202 9203 /// For kernels all parameters are really passed in a special buffer. It doesn't 9204 /// make sense to pass anything byval, so everything must be direct. 9205 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { 9206 Ty = useFirstFieldIfTransparentUnion(Ty); 9207 9208 // TODO: Can we omit empty structs? 9209 9210 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 9211 Ty = QualType(SeltTy, 0); 9212 9213 llvm::Type *OrigLTy = CGT.ConvertType(Ty); 9214 llvm::Type *LTy = OrigLTy; 9215 if (getContext().getLangOpts().HIP) { 9216 LTy = coerceKernelArgumentType( 9217 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default), 9218 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device)); 9219 } 9220 9221 // FIXME: Should also use this for OpenCL, but it requires addressing the 9222 // problem of kernels being called. 9223 // 9224 // FIXME: This doesn't apply the optimization of coercing pointers in structs 9225 // to global address space when using byref. This would require implementing a 9226 // new kind of coercion of the in-memory type when for indirect arguments. 9227 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy && 9228 isAggregateTypeForABI(Ty)) { 9229 return ABIArgInfo::getIndirectAliased( 9230 getContext().getTypeAlignInChars(Ty), 9231 getContext().getTargetAddressSpace(LangAS::opencl_constant), 9232 false /*Realign*/, nullptr /*Padding*/); 9233 } 9234 9235 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 9236 // individual elements, which confuses the Clover OpenCL backend; therefore we 9237 // have to set it to false here. Other args of getDirect() are just defaults. 9238 return ABIArgInfo::getDirect(LTy, 0, nullptr, false); 9239 } 9240 9241 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, 9242 unsigned &NumRegsLeft) const { 9243 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); 9244 9245 Ty = useFirstFieldIfTransparentUnion(Ty); 9246 9247 if (isAggregateTypeForABI(Ty)) { 9248 // Records with non-trivial destructors/copy-constructors should not be 9249 // passed by value. 9250 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 9251 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 9252 9253 // Ignore empty structs/unions. 9254 if (isEmptyRecord(getContext(), Ty, true)) 9255 return ABIArgInfo::getIgnore(); 9256 9257 // Lower single-element structs to just pass a regular value. TODO: We 9258 // could do reasonable-size multiple-element structs too, using getExpand(), 9259 // though watch out for things like bitfields. 9260 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 9261 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 9262 9263 if (const RecordType *RT = Ty->getAs<RecordType>()) { 9264 const RecordDecl *RD = RT->getDecl(); 9265 if (RD->hasFlexibleArrayMember()) 9266 return DefaultABIInfo::classifyArgumentType(Ty); 9267 } 9268 9269 // Pack aggregates <= 8 bytes into single VGPR or pair. 9270 uint64_t Size = getContext().getTypeSize(Ty); 9271 if (Size <= 64) { 9272 unsigned NumRegs = (Size + 31) / 32; 9273 NumRegsLeft -= std::min(NumRegsLeft, NumRegs); 9274 9275 if (Size <= 16) 9276 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 9277 9278 if (Size <= 32) 9279 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 9280 9281 // XXX: Should this be i64 instead, and should the limit increase? 9282 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 9283 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 9284 } 9285 9286 if (NumRegsLeft > 0) { 9287 unsigned NumRegs = numRegsForType(Ty); 9288 if (NumRegsLeft >= NumRegs) { 9289 NumRegsLeft -= NumRegs; 9290 return ABIArgInfo::getDirect(); 9291 } 9292 } 9293 } 9294 9295 // Otherwise just do the default thing. 9296 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); 9297 if (!ArgInfo.isIndirect()) { 9298 unsigned NumRegs = numRegsForType(Ty); 9299 NumRegsLeft -= std::min(NumRegs, NumRegsLeft); 9300 } 9301 9302 return ArgInfo; 9303 } 9304 9305 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 9306 public: 9307 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 9308 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {} 9309 9310 void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F, 9311 CodeGenModule &CGM) const; 9312 9313 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 9314 CodeGen::CodeGenModule &M) const override; 9315 unsigned getOpenCLKernelCallingConv() const override; 9316 9317 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 9318 llvm::PointerType *T, QualType QT) const override; 9319 9320 LangAS getASTAllocaAddressSpace() const override { 9321 return getLangASFromTargetAS( 9322 getABIInfo().getDataLayout().getAllocaAddrSpace()); 9323 } 9324 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, 9325 const VarDecl *D) const override; 9326 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, 9327 SyncScope Scope, 9328 llvm::AtomicOrdering Ordering, 9329 llvm::LLVMContext &Ctx) const override; 9330 llvm::Function * 9331 createEnqueuedBlockKernel(CodeGenFunction &CGF, 9332 llvm::Function *BlockInvokeFunc, 9333 llvm::Type *BlockTy) const override; 9334 bool shouldEmitStaticExternCAliases() const override; 9335 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; 9336 }; 9337 } 9338 9339 static bool requiresAMDGPUProtectedVisibility(const Decl *D, 9340 llvm::GlobalValue *GV) { 9341 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) 9342 return false; 9343 9344 return D->hasAttr<OpenCLKernelAttr>() || 9345 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || 9346 (isa<VarDecl>(D) && 9347 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 9348 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() || 9349 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())); 9350 } 9351 9352 void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes( 9353 const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const { 9354 const auto *ReqdWGS = 9355 M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 9356 const bool IsOpenCLKernel = 9357 M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>(); 9358 const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>(); 9359 9360 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 9361 if (ReqdWGS || FlatWGS) { 9362 unsigned Min = 0; 9363 unsigned Max = 0; 9364 if (FlatWGS) { 9365 Min = FlatWGS->getMin() 9366 ->EvaluateKnownConstInt(M.getContext()) 9367 .getExtValue(); 9368 Max = FlatWGS->getMax() 9369 ->EvaluateKnownConstInt(M.getContext()) 9370 .getExtValue(); 9371 } 9372 if (ReqdWGS && Min == 0 && Max == 0) 9373 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 9374 9375 if (Min != 0) { 9376 assert(Min <= Max && "Min must be less than or equal Max"); 9377 9378 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 9379 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 9380 } else 9381 assert(Max == 0 && "Max must be zero"); 9382 } else if (IsOpenCLKernel || IsHIPKernel) { 9383 // By default, restrict the maximum size to a value specified by 9384 // --gpu-max-threads-per-block=n or its default value for HIP. 9385 const unsigned OpenCLDefaultMaxWorkGroupSize = 256; 9386 const unsigned DefaultMaxWorkGroupSize = 9387 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize 9388 : M.getLangOpts().GPUMaxThreadsPerBlock; 9389 std::string AttrVal = 9390 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize); 9391 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 9392 } 9393 9394 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 9395 unsigned Min = 9396 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); 9397 unsigned Max = Attr->getMax() ? Attr->getMax() 9398 ->EvaluateKnownConstInt(M.getContext()) 9399 .getExtValue() 9400 : 0; 9401 9402 if (Min != 0) { 9403 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 9404 9405 std::string AttrVal = llvm::utostr(Min); 9406 if (Max != 0) 9407 AttrVal = AttrVal + "," + llvm::utostr(Max); 9408 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 9409 } else 9410 assert(Max == 0 && "Max must be zero"); 9411 } 9412 9413 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 9414 unsigned NumSGPR = Attr->getNumSGPR(); 9415 9416 if (NumSGPR != 0) 9417 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 9418 } 9419 9420 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 9421 uint32_t NumVGPR = Attr->getNumVGPR(); 9422 9423 if (NumVGPR != 0) 9424 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 9425 } 9426 } 9427 9428 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 9429 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 9430 if (requiresAMDGPUProtectedVisibility(D, GV)) { 9431 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); 9432 GV->setDSOLocal(true); 9433 } 9434 9435 if (GV->isDeclaration()) 9436 return; 9437 9438 llvm::Function *F = dyn_cast<llvm::Function>(GV); 9439 if (!F) 9440 return; 9441 9442 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 9443 if (FD) 9444 setFunctionDeclAttributes(FD, F, M); 9445 9446 const bool IsHIPKernel = 9447 M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>(); 9448 9449 if (IsHIPKernel) 9450 F->addFnAttr("uniform-work-group-size", "true"); 9451 9452 if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics()) 9453 F->addFnAttr("amdgpu-unsafe-fp-atomics", "true"); 9454 9455 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts) 9456 F->addFnAttr("amdgpu-ieee", "false"); 9457 } 9458 9459 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 9460 return llvm::CallingConv::AMDGPU_KERNEL; 9461 } 9462 9463 // Currently LLVM assumes null pointers always have value 0, 9464 // which results in incorrectly transformed IR. Therefore, instead of 9465 // emitting null pointers in private and local address spaces, a null 9466 // pointer in generic address space is emitted which is casted to a 9467 // pointer in local or private address space. 9468 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 9469 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 9470 QualType QT) const { 9471 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 9472 return llvm::ConstantPointerNull::get(PT); 9473 9474 auto &Ctx = CGM.getContext(); 9475 auto NPT = llvm::PointerType::getWithSamePointeeType( 9476 PT, Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 9477 return llvm::ConstantExpr::getAddrSpaceCast( 9478 llvm::ConstantPointerNull::get(NPT), PT); 9479 } 9480 9481 LangAS 9482 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 9483 const VarDecl *D) const { 9484 assert(!CGM.getLangOpts().OpenCL && 9485 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 9486 "Address space agnostic languages only"); 9487 LangAS DefaultGlobalAS = getLangASFromTargetAS( 9488 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); 9489 if (!D) 9490 return DefaultGlobalAS; 9491 9492 LangAS AddrSpace = D->getType().getAddressSpace(); 9493 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)); 9494 if (AddrSpace != LangAS::Default) 9495 return AddrSpace; 9496 9497 // Only promote to address space 4 if VarDecl has constant initialization. 9498 if (CGM.isTypeConstant(D->getType(), false) && 9499 D->hasConstantInitialization()) { 9500 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 9501 return *ConstAS; 9502 } 9503 return DefaultGlobalAS; 9504 } 9505 9506 llvm::SyncScope::ID 9507 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 9508 SyncScope Scope, 9509 llvm::AtomicOrdering Ordering, 9510 llvm::LLVMContext &Ctx) const { 9511 std::string Name; 9512 switch (Scope) { 9513 case SyncScope::HIPSingleThread: 9514 Name = "singlethread"; 9515 break; 9516 case SyncScope::HIPWavefront: 9517 case SyncScope::OpenCLSubGroup: 9518 Name = "wavefront"; 9519 break; 9520 case SyncScope::HIPWorkgroup: 9521 case SyncScope::OpenCLWorkGroup: 9522 Name = "workgroup"; 9523 break; 9524 case SyncScope::HIPAgent: 9525 case SyncScope::OpenCLDevice: 9526 Name = "agent"; 9527 break; 9528 case SyncScope::HIPSystem: 9529 case SyncScope::OpenCLAllSVMDevices: 9530 Name = ""; 9531 break; 9532 } 9533 9534 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { 9535 if (!Name.empty()) 9536 Name = Twine(Twine(Name) + Twine("-")).str(); 9537 9538 Name = Twine(Twine(Name) + Twine("one-as")).str(); 9539 } 9540 9541 return Ctx.getOrInsertSyncScopeID(Name); 9542 } 9543 9544 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 9545 return false; 9546 } 9547 9548 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( 9549 const FunctionType *&FT) const { 9550 FT = getABIInfo().getContext().adjustFunctionType( 9551 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); 9552 } 9553 9554 //===----------------------------------------------------------------------===// 9555 // SPARC v8 ABI Implementation. 9556 // Based on the SPARC Compliance Definition version 2.4.1. 9557 // 9558 // Ensures that complex values are passed in registers. 9559 // 9560 namespace { 9561 class SparcV8ABIInfo : public DefaultABIInfo { 9562 public: 9563 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 9564 9565 private: 9566 ABIArgInfo classifyReturnType(QualType RetTy) const; 9567 void computeInfo(CGFunctionInfo &FI) const override; 9568 }; 9569 } // end anonymous namespace 9570 9571 9572 ABIArgInfo 9573 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 9574 if (Ty->isAnyComplexType()) { 9575 return ABIArgInfo::getDirect(); 9576 } 9577 else { 9578 return DefaultABIInfo::classifyReturnType(Ty); 9579 } 9580 } 9581 9582 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9583 9584 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9585 for (auto &Arg : FI.arguments()) 9586 Arg.info = classifyArgumentType(Arg.type); 9587 } 9588 9589 namespace { 9590 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 9591 public: 9592 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 9593 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {} 9594 9595 llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF, 9596 llvm::Value *Address) const override { 9597 int Offset; 9598 if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType())) 9599 Offset = 12; 9600 else 9601 Offset = 8; 9602 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 9603 llvm::ConstantInt::get(CGF.Int32Ty, Offset)); 9604 } 9605 9606 llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF, 9607 llvm::Value *Address) const override { 9608 int Offset; 9609 if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType())) 9610 Offset = -12; 9611 else 9612 Offset = -8; 9613 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 9614 llvm::ConstantInt::get(CGF.Int32Ty, Offset)); 9615 } 9616 }; 9617 } // end anonymous namespace 9618 9619 //===----------------------------------------------------------------------===// 9620 // SPARC v9 ABI Implementation. 9621 // Based on the SPARC Compliance Definition version 2.4.1. 9622 // 9623 // Function arguments a mapped to a nominal "parameter array" and promoted to 9624 // registers depending on their type. Each argument occupies 8 or 16 bytes in 9625 // the array, structs larger than 16 bytes are passed indirectly. 9626 // 9627 // One case requires special care: 9628 // 9629 // struct mixed { 9630 // int i; 9631 // float f; 9632 // }; 9633 // 9634 // When a struct mixed is passed by value, it only occupies 8 bytes in the 9635 // parameter array, but the int is passed in an integer register, and the float 9636 // is passed in a floating point register. This is represented as two arguments 9637 // with the LLVM IR inreg attribute: 9638 // 9639 // declare void f(i32 inreg %i, float inreg %f) 9640 // 9641 // The code generator will only allocate 4 bytes from the parameter array for 9642 // the inreg arguments. All other arguments are allocated a multiple of 8 9643 // bytes. 9644 // 9645 namespace { 9646 class SparcV9ABIInfo : public ABIInfo { 9647 public: 9648 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 9649 9650 private: 9651 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 9652 void computeInfo(CGFunctionInfo &FI) const override; 9653 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9654 QualType Ty) const override; 9655 9656 // Coercion type builder for structs passed in registers. The coercion type 9657 // serves two purposes: 9658 // 9659 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 9660 // in registers. 9661 // 2. Expose aligned floating point elements as first-level elements, so the 9662 // code generator knows to pass them in floating point registers. 9663 // 9664 // We also compute the InReg flag which indicates that the struct contains 9665 // aligned 32-bit floats. 9666 // 9667 struct CoerceBuilder { 9668 llvm::LLVMContext &Context; 9669 const llvm::DataLayout &DL; 9670 SmallVector<llvm::Type*, 8> Elems; 9671 uint64_t Size; 9672 bool InReg; 9673 9674 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 9675 : Context(c), DL(dl), Size(0), InReg(false) {} 9676 9677 // Pad Elems with integers until Size is ToSize. 9678 void pad(uint64_t ToSize) { 9679 assert(ToSize >= Size && "Cannot remove elements"); 9680 if (ToSize == Size) 9681 return; 9682 9683 // Finish the current 64-bit word. 9684 uint64_t Aligned = llvm::alignTo(Size, 64); 9685 if (Aligned > Size && Aligned <= ToSize) { 9686 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 9687 Size = Aligned; 9688 } 9689 9690 // Add whole 64-bit words. 9691 while (Size + 64 <= ToSize) { 9692 Elems.push_back(llvm::Type::getInt64Ty(Context)); 9693 Size += 64; 9694 } 9695 9696 // Final in-word padding. 9697 if (Size < ToSize) { 9698 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 9699 Size = ToSize; 9700 } 9701 } 9702 9703 // Add a floating point element at Offset. 9704 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 9705 // Unaligned floats are treated as integers. 9706 if (Offset % Bits) 9707 return; 9708 // The InReg flag is only required if there are any floats < 64 bits. 9709 if (Bits < 64) 9710 InReg = true; 9711 pad(Offset); 9712 Elems.push_back(Ty); 9713 Size = Offset + Bits; 9714 } 9715 9716 // Add a struct type to the coercion type, starting at Offset (in bits). 9717 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 9718 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 9719 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 9720 llvm::Type *ElemTy = StrTy->getElementType(i); 9721 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 9722 switch (ElemTy->getTypeID()) { 9723 case llvm::Type::StructTyID: 9724 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 9725 break; 9726 case llvm::Type::FloatTyID: 9727 addFloat(ElemOffset, ElemTy, 32); 9728 break; 9729 case llvm::Type::DoubleTyID: 9730 addFloat(ElemOffset, ElemTy, 64); 9731 break; 9732 case llvm::Type::FP128TyID: 9733 addFloat(ElemOffset, ElemTy, 128); 9734 break; 9735 case llvm::Type::PointerTyID: 9736 if (ElemOffset % 64 == 0) { 9737 pad(ElemOffset); 9738 Elems.push_back(ElemTy); 9739 Size += 64; 9740 } 9741 break; 9742 default: 9743 break; 9744 } 9745 } 9746 } 9747 9748 // Check if Ty is a usable substitute for the coercion type. 9749 bool isUsableType(llvm::StructType *Ty) const { 9750 return llvm::makeArrayRef(Elems) == Ty->elements(); 9751 } 9752 9753 // Get the coercion type as a literal struct type. 9754 llvm::Type *getType() const { 9755 if (Elems.size() == 1) 9756 return Elems.front(); 9757 else 9758 return llvm::StructType::get(Context, Elems); 9759 } 9760 }; 9761 }; 9762 } // end anonymous namespace 9763 9764 ABIArgInfo 9765 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 9766 if (Ty->isVoidType()) 9767 return ABIArgInfo::getIgnore(); 9768 9769 uint64_t Size = getContext().getTypeSize(Ty); 9770 9771 // Anything too big to fit in registers is passed with an explicit indirect 9772 // pointer / sret pointer. 9773 if (Size > SizeLimit) 9774 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 9775 9776 // Treat an enum type as its underlying type. 9777 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9778 Ty = EnumTy->getDecl()->getIntegerType(); 9779 9780 // Integer types smaller than a register are extended. 9781 if (Size < 64 && Ty->isIntegerType()) 9782 return ABIArgInfo::getExtend(Ty); 9783 9784 if (const auto *EIT = Ty->getAs<BitIntType>()) 9785 if (EIT->getNumBits() < 64) 9786 return ABIArgInfo::getExtend(Ty); 9787 9788 // Other non-aggregates go in registers. 9789 if (!isAggregateTypeForABI(Ty)) 9790 return ABIArgInfo::getDirect(); 9791 9792 // If a C++ object has either a non-trivial copy constructor or a non-trivial 9793 // destructor, it is passed with an explicit indirect pointer / sret pointer. 9794 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 9795 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 9796 9797 // This is a small aggregate type that should be passed in registers. 9798 // Build a coercion type from the LLVM struct type. 9799 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 9800 if (!StrTy) 9801 return ABIArgInfo::getDirect(); 9802 9803 CoerceBuilder CB(getVMContext(), getDataLayout()); 9804 CB.addStruct(0, StrTy); 9805 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 9806 9807 // Try to use the original type for coercion. 9808 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 9809 9810 if (CB.InReg) 9811 return ABIArgInfo::getDirectInReg(CoerceTy); 9812 else 9813 return ABIArgInfo::getDirect(CoerceTy); 9814 } 9815 9816 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9817 QualType Ty) const { 9818 ABIArgInfo AI = classifyType(Ty, 16 * 8); 9819 llvm::Type *ArgTy = CGT.ConvertType(Ty); 9820 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 9821 AI.setCoerceToType(ArgTy); 9822 9823 CharUnits SlotSize = CharUnits::fromQuantity(8); 9824 9825 CGBuilderTy &Builder = CGF.Builder; 9826 Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"), 9827 getVAListElementType(CGF), SlotSize); 9828 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 9829 9830 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 9831 9832 Address ArgAddr = Address::invalid(); 9833 CharUnits Stride; 9834 switch (AI.getKind()) { 9835 case ABIArgInfo::Expand: 9836 case ABIArgInfo::CoerceAndExpand: 9837 case ABIArgInfo::InAlloca: 9838 llvm_unreachable("Unsupported ABI kind for va_arg"); 9839 9840 case ABIArgInfo::Extend: { 9841 Stride = SlotSize; 9842 CharUnits Offset = SlotSize - TypeInfo.Width; 9843 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 9844 break; 9845 } 9846 9847 case ABIArgInfo::Direct: { 9848 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 9849 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 9850 ArgAddr = Addr; 9851 break; 9852 } 9853 9854 case ABIArgInfo::Indirect: 9855 case ABIArgInfo::IndirectAliased: 9856 Stride = SlotSize; 9857 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 9858 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy, 9859 TypeInfo.Align); 9860 break; 9861 9862 case ABIArgInfo::Ignore: 9863 return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align); 9864 } 9865 9866 // Update VAList. 9867 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); 9868 Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 9869 9870 return Builder.CreateElementBitCast(ArgAddr, ArgTy, "arg.addr"); 9871 } 9872 9873 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9874 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 9875 for (auto &I : FI.arguments()) 9876 I.info = classifyType(I.type, 16 * 8); 9877 } 9878 9879 namespace { 9880 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 9881 public: 9882 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 9883 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {} 9884 9885 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 9886 return 14; 9887 } 9888 9889 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9890 llvm::Value *Address) const override; 9891 9892 llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF, 9893 llvm::Value *Address) const override { 9894 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 9895 llvm::ConstantInt::get(CGF.Int32Ty, 8)); 9896 } 9897 9898 llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF, 9899 llvm::Value *Address) const override { 9900 return CGF.Builder.CreateGEP(CGF.Int8Ty, Address, 9901 llvm::ConstantInt::get(CGF.Int32Ty, -8)); 9902 } 9903 }; 9904 } // end anonymous namespace 9905 9906 bool 9907 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9908 llvm::Value *Address) const { 9909 // This is calculated from the LLVM and GCC tables and verified 9910 // against gcc output. AFAIK all ABIs use the same encoding. 9911 9912 CodeGen::CGBuilderTy &Builder = CGF.Builder; 9913 9914 llvm::IntegerType *i8 = CGF.Int8Ty; 9915 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 9916 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 9917 9918 // 0-31: the 8-byte general-purpose registers 9919 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 9920 9921 // 32-63: f0-31, the 4-byte floating-point registers 9922 AssignToArrayRange(Builder, Address, Four8, 32, 63); 9923 9924 // Y = 64 9925 // PSR = 65 9926 // WIM = 66 9927 // TBR = 67 9928 // PC = 68 9929 // NPC = 69 9930 // FSR = 70 9931 // CSR = 71 9932 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 9933 9934 // 72-87: d0-15, the 8-byte floating-point registers 9935 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 9936 9937 return false; 9938 } 9939 9940 // ARC ABI implementation. 9941 namespace { 9942 9943 class ARCABIInfo : public DefaultABIInfo { 9944 public: 9945 using DefaultABIInfo::DefaultABIInfo; 9946 9947 private: 9948 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9949 QualType Ty) const override; 9950 9951 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { 9952 if (!State.FreeRegs) 9953 return; 9954 if (Info.isIndirect() && Info.getInReg()) 9955 State.FreeRegs--; 9956 else if (Info.isDirect() && Info.getInReg()) { 9957 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; 9958 if (sz < State.FreeRegs) 9959 State.FreeRegs -= sz; 9960 else 9961 State.FreeRegs = 0; 9962 } 9963 } 9964 9965 void computeInfo(CGFunctionInfo &FI) const override { 9966 CCState State(FI); 9967 // ARC uses 8 registers to pass arguments. 9968 State.FreeRegs = 8; 9969 9970 if (!getCXXABI().classifyReturnType(FI)) 9971 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9972 updateState(FI.getReturnInfo(), FI.getReturnType(), State); 9973 for (auto &I : FI.arguments()) { 9974 I.info = classifyArgumentType(I.type, State.FreeRegs); 9975 updateState(I.info, I.type, State); 9976 } 9977 } 9978 9979 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; 9980 ABIArgInfo getIndirectByValue(QualType Ty) const; 9981 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; 9982 ABIArgInfo classifyReturnType(QualType RetTy) const; 9983 }; 9984 9985 class ARCTargetCodeGenInfo : public TargetCodeGenInfo { 9986 public: 9987 ARCTargetCodeGenInfo(CodeGenTypes &CGT) 9988 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {} 9989 }; 9990 9991 9992 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { 9993 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : 9994 getNaturalAlignIndirect(Ty, false); 9995 } 9996 9997 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { 9998 // Compute the byval alignment. 9999 const unsigned MinABIStackAlignInBytes = 4; 10000 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 10001 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 10002 TypeAlign > MinABIStackAlignInBytes); 10003 } 10004 10005 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10006 QualType Ty) const { 10007 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 10008 getContext().getTypeInfoInChars(Ty), 10009 CharUnits::fromQuantity(4), true); 10010 } 10011 10012 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, 10013 uint8_t FreeRegs) const { 10014 // Handle the generic C++ ABI. 10015 const RecordType *RT = Ty->getAs<RecordType>(); 10016 if (RT) { 10017 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 10018 if (RAA == CGCXXABI::RAA_Indirect) 10019 return getIndirectByRef(Ty, FreeRegs > 0); 10020 10021 if (RAA == CGCXXABI::RAA_DirectInMemory) 10022 return getIndirectByValue(Ty); 10023 } 10024 10025 // Treat an enum type as its underlying type. 10026 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 10027 Ty = EnumTy->getDecl()->getIntegerType(); 10028 10029 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; 10030 10031 if (isAggregateTypeForABI(Ty)) { 10032 // Structures with flexible arrays are always indirect. 10033 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 10034 return getIndirectByValue(Ty); 10035 10036 // Ignore empty structs/unions. 10037 if (isEmptyRecord(getContext(), Ty, true)) 10038 return ABIArgInfo::getIgnore(); 10039 10040 llvm::LLVMContext &LLVMContext = getVMContext(); 10041 10042 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 10043 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 10044 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 10045 10046 return FreeRegs >= SizeInRegs ? 10047 ABIArgInfo::getDirectInReg(Result) : 10048 ABIArgInfo::getDirect(Result, 0, nullptr, false); 10049 } 10050 10051 if (const auto *EIT = Ty->getAs<BitIntType>()) 10052 if (EIT->getNumBits() > 64) 10053 return getIndirectByValue(Ty); 10054 10055 return isPromotableIntegerTypeForABI(Ty) 10056 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) 10057 : ABIArgInfo::getExtend(Ty)) 10058 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() 10059 : ABIArgInfo::getDirect()); 10060 } 10061 10062 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { 10063 if (RetTy->isAnyComplexType()) 10064 return ABIArgInfo::getDirectInReg(); 10065 10066 // Arguments of size > 4 registers are indirect. 10067 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; 10068 if (RetSize > 4) 10069 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true); 10070 10071 return DefaultABIInfo::classifyReturnType(RetTy); 10072 } 10073 10074 } // End anonymous namespace. 10075 10076 //===----------------------------------------------------------------------===// 10077 // XCore ABI Implementation 10078 //===----------------------------------------------------------------------===// 10079 10080 namespace { 10081 10082 /// A SmallStringEnc instance is used to build up the TypeString by passing 10083 /// it by reference between functions that append to it. 10084 typedef llvm::SmallString<128> SmallStringEnc; 10085 10086 /// TypeStringCache caches the meta encodings of Types. 10087 /// 10088 /// The reason for caching TypeStrings is two fold: 10089 /// 1. To cache a type's encoding for later uses; 10090 /// 2. As a means to break recursive member type inclusion. 10091 /// 10092 /// A cache Entry can have a Status of: 10093 /// NonRecursive: The type encoding is not recursive; 10094 /// Recursive: The type encoding is recursive; 10095 /// Incomplete: An incomplete TypeString; 10096 /// IncompleteUsed: An incomplete TypeString that has been used in a 10097 /// Recursive type encoding. 10098 /// 10099 /// A NonRecursive entry will have all of its sub-members expanded as fully 10100 /// as possible. Whilst it may contain types which are recursive, the type 10101 /// itself is not recursive and thus its encoding may be safely used whenever 10102 /// the type is encountered. 10103 /// 10104 /// A Recursive entry will have all of its sub-members expanded as fully as 10105 /// possible. The type itself is recursive and it may contain other types which 10106 /// are recursive. The Recursive encoding must not be used during the expansion 10107 /// of a recursive type's recursive branch. For simplicity the code uses 10108 /// IncompleteCount to reject all usage of Recursive encodings for member types. 10109 /// 10110 /// An Incomplete entry is always a RecordType and only encodes its 10111 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 10112 /// are placed into the cache during type expansion as a means to identify and 10113 /// handle recursive inclusion of types as sub-members. If there is recursion 10114 /// the entry becomes IncompleteUsed. 10115 /// 10116 /// During the expansion of a RecordType's members: 10117 /// 10118 /// If the cache contains a NonRecursive encoding for the member type, the 10119 /// cached encoding is used; 10120 /// 10121 /// If the cache contains a Recursive encoding for the member type, the 10122 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 10123 /// 10124 /// If the member is a RecordType, an Incomplete encoding is placed into the 10125 /// cache to break potential recursive inclusion of itself as a sub-member; 10126 /// 10127 /// Once a member RecordType has been expanded, its temporary incomplete 10128 /// entry is removed from the cache. If a Recursive encoding was swapped out 10129 /// it is swapped back in; 10130 /// 10131 /// If an incomplete entry is used to expand a sub-member, the incomplete 10132 /// entry is marked as IncompleteUsed. The cache keeps count of how many 10133 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 10134 /// 10135 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 10136 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 10137 /// Else the member is part of a recursive type and thus the recursion has 10138 /// been exited too soon for the encoding to be correct for the member. 10139 /// 10140 class TypeStringCache { 10141 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 10142 struct Entry { 10143 std::string Str; // The encoded TypeString for the type. 10144 enum Status State; // Information about the encoding in 'Str'. 10145 std::string Swapped; // A temporary place holder for a Recursive encoding 10146 // during the expansion of RecordType's members. 10147 }; 10148 std::map<const IdentifierInfo *, struct Entry> Map; 10149 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 10150 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 10151 public: 10152 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 10153 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 10154 bool removeIncomplete(const IdentifierInfo *ID); 10155 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 10156 bool IsRecursive); 10157 StringRef lookupStr(const IdentifierInfo *ID); 10158 }; 10159 10160 /// TypeString encodings for enum & union fields must be order. 10161 /// FieldEncoding is a helper for this ordering process. 10162 class FieldEncoding { 10163 bool HasName; 10164 std::string Enc; 10165 public: 10166 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 10167 StringRef str() { return Enc; } 10168 bool operator<(const FieldEncoding &rhs) const { 10169 if (HasName != rhs.HasName) return HasName; 10170 return Enc < rhs.Enc; 10171 } 10172 }; 10173 10174 class XCoreABIInfo : public DefaultABIInfo { 10175 public: 10176 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 10177 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10178 QualType Ty) const override; 10179 }; 10180 10181 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 10182 mutable TypeStringCache TSC; 10183 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 10184 const CodeGen::CodeGenModule &M) const; 10185 10186 public: 10187 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 10188 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {} 10189 void emitTargetMetadata(CodeGen::CodeGenModule &CGM, 10190 const llvm::MapVector<GlobalDecl, StringRef> 10191 &MangledDeclNames) const override; 10192 }; 10193 10194 } // End anonymous namespace. 10195 10196 // TODO: this implementation is likely now redundant with the default 10197 // EmitVAArg. 10198 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10199 QualType Ty) const { 10200 CGBuilderTy &Builder = CGF.Builder; 10201 10202 // Get the VAList. 10203 CharUnits SlotSize = CharUnits::fromQuantity(4); 10204 Address AP = Address(Builder.CreateLoad(VAListAddr), 10205 getVAListElementType(CGF), SlotSize); 10206 10207 // Handle the argument. 10208 ABIArgInfo AI = classifyArgumentType(Ty); 10209 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 10210 llvm::Type *ArgTy = CGT.ConvertType(Ty); 10211 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 10212 AI.setCoerceToType(ArgTy); 10213 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 10214 10215 Address Val = Address::invalid(); 10216 CharUnits ArgSize = CharUnits::Zero(); 10217 switch (AI.getKind()) { 10218 case ABIArgInfo::Expand: 10219 case ABIArgInfo::CoerceAndExpand: 10220 case ABIArgInfo::InAlloca: 10221 llvm_unreachable("Unsupported ABI kind for va_arg"); 10222 case ABIArgInfo::Ignore: 10223 Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign); 10224 ArgSize = CharUnits::Zero(); 10225 break; 10226 case ABIArgInfo::Extend: 10227 case ABIArgInfo::Direct: 10228 Val = Builder.CreateElementBitCast(AP, ArgTy); 10229 ArgSize = CharUnits::fromQuantity( 10230 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 10231 ArgSize = ArgSize.alignTo(SlotSize); 10232 break; 10233 case ABIArgInfo::Indirect: 10234 case ABIArgInfo::IndirectAliased: 10235 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 10236 Val = Address(Builder.CreateLoad(Val), ArgTy, TypeAlign); 10237 ArgSize = SlotSize; 10238 break; 10239 } 10240 10241 // Increment the VAList. 10242 if (!ArgSize.isZero()) { 10243 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); 10244 Builder.CreateStore(APN.getPointer(), VAListAddr); 10245 } 10246 10247 return Val; 10248 } 10249 10250 /// During the expansion of a RecordType, an incomplete TypeString is placed 10251 /// into the cache as a means to identify and break recursion. 10252 /// If there is a Recursive encoding in the cache, it is swapped out and will 10253 /// be reinserted by removeIncomplete(). 10254 /// All other types of encoding should have been used rather than arriving here. 10255 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 10256 std::string StubEnc) { 10257 if (!ID) 10258 return; 10259 Entry &E = Map[ID]; 10260 assert( (E.Str.empty() || E.State == Recursive) && 10261 "Incorrectly use of addIncomplete"); 10262 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 10263 E.Swapped.swap(E.Str); // swap out the Recursive 10264 E.Str.swap(StubEnc); 10265 E.State = Incomplete; 10266 ++IncompleteCount; 10267 } 10268 10269 /// Once the RecordType has been expanded, the temporary incomplete TypeString 10270 /// must be removed from the cache. 10271 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 10272 /// Returns true if the RecordType was defined recursively. 10273 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 10274 if (!ID) 10275 return false; 10276 auto I = Map.find(ID); 10277 assert(I != Map.end() && "Entry not present"); 10278 Entry &E = I->second; 10279 assert( (E.State == Incomplete || 10280 E.State == IncompleteUsed) && 10281 "Entry must be an incomplete type"); 10282 bool IsRecursive = false; 10283 if (E.State == IncompleteUsed) { 10284 // We made use of our Incomplete encoding, thus we are recursive. 10285 IsRecursive = true; 10286 --IncompleteUsedCount; 10287 } 10288 if (E.Swapped.empty()) 10289 Map.erase(I); 10290 else { 10291 // Swap the Recursive back. 10292 E.Swapped.swap(E.Str); 10293 E.Swapped.clear(); 10294 E.State = Recursive; 10295 } 10296 --IncompleteCount; 10297 return IsRecursive; 10298 } 10299 10300 /// Add the encoded TypeString to the cache only if it is NonRecursive or 10301 /// Recursive (viz: all sub-members were expanded as fully as possible). 10302 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 10303 bool IsRecursive) { 10304 if (!ID || IncompleteUsedCount) 10305 return; // No key or it is is an incomplete sub-type so don't add. 10306 Entry &E = Map[ID]; 10307 if (IsRecursive && !E.Str.empty()) { 10308 assert(E.State==Recursive && E.Str.size() == Str.size() && 10309 "This is not the same Recursive entry"); 10310 // The parent container was not recursive after all, so we could have used 10311 // this Recursive sub-member entry after all, but we assumed the worse when 10312 // we started viz: IncompleteCount!=0. 10313 return; 10314 } 10315 assert(E.Str.empty() && "Entry already present"); 10316 E.Str = Str.str(); 10317 E.State = IsRecursive? Recursive : NonRecursive; 10318 } 10319 10320 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 10321 /// are recursively expanding a type (IncompleteCount != 0) and the cached 10322 /// encoding is Recursive, return an empty StringRef. 10323 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 10324 if (!ID) 10325 return StringRef(); // We have no key. 10326 auto I = Map.find(ID); 10327 if (I == Map.end()) 10328 return StringRef(); // We have no encoding. 10329 Entry &E = I->second; 10330 if (E.State == Recursive && IncompleteCount) 10331 return StringRef(); // We don't use Recursive encodings for member types. 10332 10333 if (E.State == Incomplete) { 10334 // The incomplete type is being used to break out of recursion. 10335 E.State = IncompleteUsed; 10336 ++IncompleteUsedCount; 10337 } 10338 return E.Str; 10339 } 10340 10341 /// The XCore ABI includes a type information section that communicates symbol 10342 /// type information to the linker. The linker uses this information to verify 10343 /// safety/correctness of things such as array bound and pointers et al. 10344 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 10345 /// This type information (TypeString) is emitted into meta data for all global 10346 /// symbols: definitions, declarations, functions & variables. 10347 /// 10348 /// The TypeString carries type, qualifier, name, size & value details. 10349 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 10350 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 10351 /// The output is tested by test/CodeGen/xcore-stringtype.c. 10352 /// 10353 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 10354 const CodeGen::CodeGenModule &CGM, 10355 TypeStringCache &TSC); 10356 10357 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 10358 void XCoreTargetCodeGenInfo::emitTargetMD( 10359 const Decl *D, llvm::GlobalValue *GV, 10360 const CodeGen::CodeGenModule &CGM) const { 10361 SmallStringEnc Enc; 10362 if (getTypeString(Enc, D, CGM, TSC)) { 10363 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 10364 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 10365 llvm::MDString::get(Ctx, Enc.str())}; 10366 llvm::NamedMDNode *MD = 10367 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 10368 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 10369 } 10370 } 10371 10372 void XCoreTargetCodeGenInfo::emitTargetMetadata( 10373 CodeGen::CodeGenModule &CGM, 10374 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const { 10375 // Warning, new MangledDeclNames may be appended within this loop. 10376 // We rely on MapVector insertions adding new elements to the end 10377 // of the container. 10378 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) { 10379 auto Val = *(MangledDeclNames.begin() + I); 10380 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second); 10381 if (GV) { 10382 const Decl *D = Val.first.getDecl()->getMostRecentDecl(); 10383 emitTargetMD(D, GV, CGM); 10384 } 10385 } 10386 } 10387 10388 //===----------------------------------------------------------------------===// 10389 // Base ABI and target codegen info implementation common between SPIR and 10390 // SPIR-V. 10391 //===----------------------------------------------------------------------===// 10392 10393 namespace { 10394 class CommonSPIRABIInfo : public DefaultABIInfo { 10395 public: 10396 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); } 10397 10398 private: 10399 void setCCs(); 10400 }; 10401 10402 class SPIRVABIInfo : public CommonSPIRABIInfo { 10403 public: 10404 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {} 10405 void computeInfo(CGFunctionInfo &FI) const override; 10406 10407 private: 10408 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 10409 }; 10410 } // end anonymous namespace 10411 namespace { 10412 class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo { 10413 public: 10414 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 10415 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {} 10416 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo) 10417 : TargetCodeGenInfo(std::move(ABIInfo)) {} 10418 10419 LangAS getASTAllocaAddressSpace() const override { 10420 return getLangASFromTargetAS( 10421 getABIInfo().getDataLayout().getAllocaAddrSpace()); 10422 } 10423 10424 unsigned getOpenCLKernelCallingConv() const override; 10425 }; 10426 class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo { 10427 public: 10428 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 10429 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {} 10430 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; 10431 }; 10432 } // End anonymous namespace. 10433 10434 void CommonSPIRABIInfo::setCCs() { 10435 assert(getRuntimeCC() == llvm::CallingConv::C); 10436 RuntimeCC = llvm::CallingConv::SPIR_FUNC; 10437 } 10438 10439 ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const { 10440 if (getContext().getLangOpts().CUDAIsDevice) { 10441 // Coerce pointer arguments with default address space to CrossWorkGroup 10442 // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the 10443 // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space. 10444 llvm::Type *LTy = CGT.ConvertType(Ty); 10445 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default); 10446 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device); 10447 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy); 10448 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) { 10449 LTy = llvm::PointerType::getWithSamePointeeType(PtrTy, GlobalAS); 10450 return ABIArgInfo::getDirect(LTy, 0, nullptr, false); 10451 } 10452 10453 // Force copying aggregate type in kernel arguments by value when 10454 // compiling CUDA targeting SPIR-V. This is required for the object 10455 // copied to be valid on the device. 10456 // This behavior follows the CUDA spec 10457 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing, 10458 // and matches the NVPTX implementation. 10459 if (isAggregateTypeForABI(Ty)) 10460 return getNaturalAlignIndirect(Ty, /* byval */ true); 10461 } 10462 return classifyArgumentType(Ty); 10463 } 10464 10465 void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const { 10466 // The logic is same as in DefaultABIInfo with an exception on the kernel 10467 // arguments handling. 10468 llvm::CallingConv::ID CC = FI.getCallingConvention(); 10469 10470 if (!getCXXABI().classifyReturnType(FI)) 10471 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 10472 10473 for (auto &I : FI.arguments()) { 10474 if (CC == llvm::CallingConv::SPIR_KERNEL) { 10475 I.info = classifyKernelArgumentType(I.type); 10476 } else { 10477 I.info = classifyArgumentType(I.type); 10478 } 10479 } 10480 } 10481 10482 namespace clang { 10483 namespace CodeGen { 10484 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 10485 if (CGM.getTarget().getTriple().isSPIRV()) 10486 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI); 10487 else 10488 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI); 10489 } 10490 } 10491 } 10492 10493 unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 10494 return llvm::CallingConv::SPIR_KERNEL; 10495 } 10496 10497 void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention( 10498 const FunctionType *&FT) const { 10499 // Convert HIP kernels to SPIR-V kernels. 10500 if (getABIInfo().getContext().getLangOpts().HIP) { 10501 FT = getABIInfo().getContext().adjustFunctionType( 10502 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); 10503 return; 10504 } 10505 } 10506 10507 static bool appendType(SmallStringEnc &Enc, QualType QType, 10508 const CodeGen::CodeGenModule &CGM, 10509 TypeStringCache &TSC); 10510 10511 /// Helper function for appendRecordType(). 10512 /// Builds a SmallVector containing the encoded field types in declaration 10513 /// order. 10514 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 10515 const RecordDecl *RD, 10516 const CodeGen::CodeGenModule &CGM, 10517 TypeStringCache &TSC) { 10518 for (const auto *Field : RD->fields()) { 10519 SmallStringEnc Enc; 10520 Enc += "m("; 10521 Enc += Field->getName(); 10522 Enc += "){"; 10523 if (Field->isBitField()) { 10524 Enc += "b("; 10525 llvm::raw_svector_ostream OS(Enc); 10526 OS << Field->getBitWidthValue(CGM.getContext()); 10527 Enc += ':'; 10528 } 10529 if (!appendType(Enc, Field->getType(), CGM, TSC)) 10530 return false; 10531 if (Field->isBitField()) 10532 Enc += ')'; 10533 Enc += '}'; 10534 FE.emplace_back(!Field->getName().empty(), Enc); 10535 } 10536 return true; 10537 } 10538 10539 /// Appends structure and union types to Enc and adds encoding to cache. 10540 /// Recursively calls appendType (via extractFieldType) for each field. 10541 /// Union types have their fields ordered according to the ABI. 10542 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 10543 const CodeGen::CodeGenModule &CGM, 10544 TypeStringCache &TSC, const IdentifierInfo *ID) { 10545 // Append the cached TypeString if we have one. 10546 StringRef TypeString = TSC.lookupStr(ID); 10547 if (!TypeString.empty()) { 10548 Enc += TypeString; 10549 return true; 10550 } 10551 10552 // Start to emit an incomplete TypeString. 10553 size_t Start = Enc.size(); 10554 Enc += (RT->isUnionType()? 'u' : 's'); 10555 Enc += '('; 10556 if (ID) 10557 Enc += ID->getName(); 10558 Enc += "){"; 10559 10560 // We collect all encoded fields and order as necessary. 10561 bool IsRecursive = false; 10562 const RecordDecl *RD = RT->getDecl()->getDefinition(); 10563 if (RD && !RD->field_empty()) { 10564 // An incomplete TypeString stub is placed in the cache for this RecordType 10565 // so that recursive calls to this RecordType will use it whilst building a 10566 // complete TypeString for this RecordType. 10567 SmallVector<FieldEncoding, 16> FE; 10568 std::string StubEnc(Enc.substr(Start).str()); 10569 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 10570 TSC.addIncomplete(ID, std::move(StubEnc)); 10571 if (!extractFieldType(FE, RD, CGM, TSC)) { 10572 (void) TSC.removeIncomplete(ID); 10573 return false; 10574 } 10575 IsRecursive = TSC.removeIncomplete(ID); 10576 // The ABI requires unions to be sorted but not structures. 10577 // See FieldEncoding::operator< for sort algorithm. 10578 if (RT->isUnionType()) 10579 llvm::sort(FE); 10580 // We can now complete the TypeString. 10581 unsigned E = FE.size(); 10582 for (unsigned I = 0; I != E; ++I) { 10583 if (I) 10584 Enc += ','; 10585 Enc += FE[I].str(); 10586 } 10587 } 10588 Enc += '}'; 10589 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 10590 return true; 10591 } 10592 10593 /// Appends enum types to Enc and adds the encoding to the cache. 10594 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 10595 TypeStringCache &TSC, 10596 const IdentifierInfo *ID) { 10597 // Append the cached TypeString if we have one. 10598 StringRef TypeString = TSC.lookupStr(ID); 10599 if (!TypeString.empty()) { 10600 Enc += TypeString; 10601 return true; 10602 } 10603 10604 size_t Start = Enc.size(); 10605 Enc += "e("; 10606 if (ID) 10607 Enc += ID->getName(); 10608 Enc += "){"; 10609 10610 // We collect all encoded enumerations and order them alphanumerically. 10611 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 10612 SmallVector<FieldEncoding, 16> FE; 10613 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 10614 ++I) { 10615 SmallStringEnc EnumEnc; 10616 EnumEnc += "m("; 10617 EnumEnc += I->getName(); 10618 EnumEnc += "){"; 10619 I->getInitVal().toString(EnumEnc); 10620 EnumEnc += '}'; 10621 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 10622 } 10623 llvm::sort(FE); 10624 unsigned E = FE.size(); 10625 for (unsigned I = 0; I != E; ++I) { 10626 if (I) 10627 Enc += ','; 10628 Enc += FE[I].str(); 10629 } 10630 } 10631 Enc += '}'; 10632 TSC.addIfComplete(ID, Enc.substr(Start), false); 10633 return true; 10634 } 10635 10636 /// Appends type's qualifier to Enc. 10637 /// This is done prior to appending the type's encoding. 10638 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 10639 // Qualifiers are emitted in alphabetical order. 10640 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 10641 int Lookup = 0; 10642 if (QT.isConstQualified()) 10643 Lookup += 1<<0; 10644 if (QT.isRestrictQualified()) 10645 Lookup += 1<<1; 10646 if (QT.isVolatileQualified()) 10647 Lookup += 1<<2; 10648 Enc += Table[Lookup]; 10649 } 10650 10651 /// Appends built-in types to Enc. 10652 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 10653 const char *EncType; 10654 switch (BT->getKind()) { 10655 case BuiltinType::Void: 10656 EncType = "0"; 10657 break; 10658 case BuiltinType::Bool: 10659 EncType = "b"; 10660 break; 10661 case BuiltinType::Char_U: 10662 EncType = "uc"; 10663 break; 10664 case BuiltinType::UChar: 10665 EncType = "uc"; 10666 break; 10667 case BuiltinType::SChar: 10668 EncType = "sc"; 10669 break; 10670 case BuiltinType::UShort: 10671 EncType = "us"; 10672 break; 10673 case BuiltinType::Short: 10674 EncType = "ss"; 10675 break; 10676 case BuiltinType::UInt: 10677 EncType = "ui"; 10678 break; 10679 case BuiltinType::Int: 10680 EncType = "si"; 10681 break; 10682 case BuiltinType::ULong: 10683 EncType = "ul"; 10684 break; 10685 case BuiltinType::Long: 10686 EncType = "sl"; 10687 break; 10688 case BuiltinType::ULongLong: 10689 EncType = "ull"; 10690 break; 10691 case BuiltinType::LongLong: 10692 EncType = "sll"; 10693 break; 10694 case BuiltinType::Float: 10695 EncType = "ft"; 10696 break; 10697 case BuiltinType::Double: 10698 EncType = "d"; 10699 break; 10700 case BuiltinType::LongDouble: 10701 EncType = "ld"; 10702 break; 10703 default: 10704 return false; 10705 } 10706 Enc += EncType; 10707 return true; 10708 } 10709 10710 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 10711 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 10712 const CodeGen::CodeGenModule &CGM, 10713 TypeStringCache &TSC) { 10714 Enc += "p("; 10715 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 10716 return false; 10717 Enc += ')'; 10718 return true; 10719 } 10720 10721 /// Appends array encoding to Enc before calling appendType for the element. 10722 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 10723 const ArrayType *AT, 10724 const CodeGen::CodeGenModule &CGM, 10725 TypeStringCache &TSC, StringRef NoSizeEnc) { 10726 if (AT->getSizeModifier() != ArrayType::Normal) 10727 return false; 10728 Enc += "a("; 10729 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 10730 CAT->getSize().toStringUnsigned(Enc); 10731 else 10732 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 10733 Enc += ':'; 10734 // The Qualifiers should be attached to the type rather than the array. 10735 appendQualifier(Enc, QT); 10736 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 10737 return false; 10738 Enc += ')'; 10739 return true; 10740 } 10741 10742 /// Appends a function encoding to Enc, calling appendType for the return type 10743 /// and the arguments. 10744 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 10745 const CodeGen::CodeGenModule &CGM, 10746 TypeStringCache &TSC) { 10747 Enc += "f{"; 10748 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 10749 return false; 10750 Enc += "}("; 10751 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 10752 // N.B. we are only interested in the adjusted param types. 10753 auto I = FPT->param_type_begin(); 10754 auto E = FPT->param_type_end(); 10755 if (I != E) { 10756 do { 10757 if (!appendType(Enc, *I, CGM, TSC)) 10758 return false; 10759 ++I; 10760 if (I != E) 10761 Enc += ','; 10762 } while (I != E); 10763 if (FPT->isVariadic()) 10764 Enc += ",va"; 10765 } else { 10766 if (FPT->isVariadic()) 10767 Enc += "va"; 10768 else 10769 Enc += '0'; 10770 } 10771 } 10772 Enc += ')'; 10773 return true; 10774 } 10775 10776 /// Handles the type's qualifier before dispatching a call to handle specific 10777 /// type encodings. 10778 static bool appendType(SmallStringEnc &Enc, QualType QType, 10779 const CodeGen::CodeGenModule &CGM, 10780 TypeStringCache &TSC) { 10781 10782 QualType QT = QType.getCanonicalType(); 10783 10784 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 10785 // The Qualifiers should be attached to the type rather than the array. 10786 // Thus we don't call appendQualifier() here. 10787 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 10788 10789 appendQualifier(Enc, QT); 10790 10791 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 10792 return appendBuiltinType(Enc, BT); 10793 10794 if (const PointerType *PT = QT->getAs<PointerType>()) 10795 return appendPointerType(Enc, PT, CGM, TSC); 10796 10797 if (const EnumType *ET = QT->getAs<EnumType>()) 10798 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 10799 10800 if (const RecordType *RT = QT->getAsStructureType()) 10801 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10802 10803 if (const RecordType *RT = QT->getAsUnionType()) 10804 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10805 10806 if (const FunctionType *FT = QT->getAs<FunctionType>()) 10807 return appendFunctionType(Enc, FT, CGM, TSC); 10808 10809 return false; 10810 } 10811 10812 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 10813 const CodeGen::CodeGenModule &CGM, 10814 TypeStringCache &TSC) { 10815 if (!D) 10816 return false; 10817 10818 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 10819 if (FD->getLanguageLinkage() != CLanguageLinkage) 10820 return false; 10821 return appendType(Enc, FD->getType(), CGM, TSC); 10822 } 10823 10824 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 10825 if (VD->getLanguageLinkage() != CLanguageLinkage) 10826 return false; 10827 QualType QT = VD->getType().getCanonicalType(); 10828 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 10829 // Global ArrayTypes are given a size of '*' if the size is unknown. 10830 // The Qualifiers should be attached to the type rather than the array. 10831 // Thus we don't call appendQualifier() here. 10832 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 10833 } 10834 return appendType(Enc, QT, CGM, TSC); 10835 } 10836 return false; 10837 } 10838 10839 //===----------------------------------------------------------------------===// 10840 // RISCV ABI Implementation 10841 //===----------------------------------------------------------------------===// 10842 10843 namespace { 10844 class RISCVABIInfo : public DefaultABIInfo { 10845 private: 10846 // Size of the integer ('x') registers in bits. 10847 unsigned XLen; 10848 // Size of the floating point ('f') registers in bits. Note that the target 10849 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target 10850 // with soft float ABI has FLen==0). 10851 unsigned FLen; 10852 static const int NumArgGPRs = 8; 10853 static const int NumArgFPRs = 8; 10854 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10855 llvm::Type *&Field1Ty, 10856 CharUnits &Field1Off, 10857 llvm::Type *&Field2Ty, 10858 CharUnits &Field2Off) const; 10859 10860 public: 10861 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) 10862 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} 10863 10864 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 10865 // non-virtual, but computeInfo is virtual, so we overload it. 10866 void computeInfo(CGFunctionInfo &FI) const override; 10867 10868 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, 10869 int &ArgFPRsLeft) const; 10870 ABIArgInfo classifyReturnType(QualType RetTy) const; 10871 10872 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10873 QualType Ty) const override; 10874 10875 ABIArgInfo extendType(QualType Ty) const; 10876 10877 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 10878 CharUnits &Field1Off, llvm::Type *&Field2Ty, 10879 CharUnits &Field2Off, int &NeededArgGPRs, 10880 int &NeededArgFPRs) const; 10881 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, 10882 CharUnits Field1Off, 10883 llvm::Type *Field2Ty, 10884 CharUnits Field2Off) const; 10885 }; 10886 } // end anonymous namespace 10887 10888 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { 10889 QualType RetTy = FI.getReturnType(); 10890 if (!getCXXABI().classifyReturnType(FI)) 10891 FI.getReturnInfo() = classifyReturnType(RetTy); 10892 10893 // IsRetIndirect is true if classifyArgumentType indicated the value should 10894 // be passed indirect, or if the type size is a scalar greater than 2*XLen 10895 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct 10896 // in LLVM IR, relying on the backend lowering code to rewrite the argument 10897 // list and pass indirectly on RV32. 10898 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; 10899 if (!IsRetIndirect && RetTy->isScalarType() && 10900 getContext().getTypeSize(RetTy) > (2 * XLen)) { 10901 if (RetTy->isComplexType() && FLen) { 10902 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType(); 10903 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; 10904 } else { 10905 // This is a normal scalar > 2*XLen, such as fp128 on RV32. 10906 IsRetIndirect = true; 10907 } 10908 } 10909 10910 // We must track the number of GPRs used in order to conform to the RISC-V 10911 // ABI, as integer scalars passed in registers should have signext/zeroext 10912 // when promoted, but are anyext if passed on the stack. As GPR usage is 10913 // different for variadic arguments, we must also track whether we are 10914 // examining a vararg or not. 10915 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 10916 int ArgFPRsLeft = FLen ? NumArgFPRs : 0; 10917 int NumFixedArgs = FI.getNumRequiredArgs(); 10918 10919 int ArgNum = 0; 10920 for (auto &ArgInfo : FI.arguments()) { 10921 bool IsFixed = ArgNum < NumFixedArgs; 10922 ArgInfo.info = 10923 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); 10924 ArgNum++; 10925 } 10926 } 10927 10928 // Returns true if the struct is a potential candidate for the floating point 10929 // calling convention. If this function returns true, the caller is 10930 // responsible for checking that if there is only a single field then that 10931 // field is a float. 10932 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10933 llvm::Type *&Field1Ty, 10934 CharUnits &Field1Off, 10935 llvm::Type *&Field2Ty, 10936 CharUnits &Field2Off) const { 10937 bool IsInt = Ty->isIntegralOrEnumerationType(); 10938 bool IsFloat = Ty->isRealFloatingType(); 10939 10940 if (IsInt || IsFloat) { 10941 uint64_t Size = getContext().getTypeSize(Ty); 10942 if (IsInt && Size > XLen) 10943 return false; 10944 // Can't be eligible if larger than the FP registers. Half precision isn't 10945 // currently supported on RISC-V and the ABI hasn't been confirmed, so 10946 // default to the integer ABI in that case. 10947 if (IsFloat && (Size > FLen || Size < 32)) 10948 return false; 10949 // Can't be eligible if an integer type was already found (int+int pairs 10950 // are not eligible). 10951 if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) 10952 return false; 10953 if (!Field1Ty) { 10954 Field1Ty = CGT.ConvertType(Ty); 10955 Field1Off = CurOff; 10956 return true; 10957 } 10958 if (!Field2Ty) { 10959 Field2Ty = CGT.ConvertType(Ty); 10960 Field2Off = CurOff; 10961 return true; 10962 } 10963 return false; 10964 } 10965 10966 if (auto CTy = Ty->getAs<ComplexType>()) { 10967 if (Field1Ty) 10968 return false; 10969 QualType EltTy = CTy->getElementType(); 10970 if (getContext().getTypeSize(EltTy) > FLen) 10971 return false; 10972 Field1Ty = CGT.ConvertType(EltTy); 10973 Field1Off = CurOff; 10974 Field2Ty = Field1Ty; 10975 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); 10976 return true; 10977 } 10978 10979 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { 10980 uint64_t ArraySize = ATy->getSize().getZExtValue(); 10981 QualType EltTy = ATy->getElementType(); 10982 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); 10983 for (uint64_t i = 0; i < ArraySize; ++i) { 10984 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, 10985 Field1Off, Field2Ty, Field2Off); 10986 if (!Ret) 10987 return false; 10988 CurOff += EltSize; 10989 } 10990 return true; 10991 } 10992 10993 if (const auto *RTy = Ty->getAs<RecordType>()) { 10994 // Structures with either a non-trivial destructor or a non-trivial 10995 // copy constructor are not eligible for the FP calling convention. 10996 if (getRecordArgABI(Ty, CGT.getCXXABI())) 10997 return false; 10998 if (isEmptyRecord(getContext(), Ty, true)) 10999 return true; 11000 const RecordDecl *RD = RTy->getDecl(); 11001 // Unions aren't eligible unless they're empty (which is caught above). 11002 if (RD->isUnion()) 11003 return false; 11004 int ZeroWidthBitFieldCount = 0; 11005 for (const FieldDecl *FD : RD->fields()) { 11006 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 11007 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); 11008 QualType QTy = FD->getType(); 11009 if (FD->isBitField()) { 11010 unsigned BitWidth = FD->getBitWidthValue(getContext()); 11011 // Allow a bitfield with a type greater than XLen as long as the 11012 // bitwidth is XLen or less. 11013 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) 11014 QTy = getContext().getIntTypeForBitwidth(XLen, false); 11015 if (BitWidth == 0) { 11016 ZeroWidthBitFieldCount++; 11017 continue; 11018 } 11019 } 11020 11021 bool Ret = detectFPCCEligibleStructHelper( 11022 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), 11023 Field1Ty, Field1Off, Field2Ty, Field2Off); 11024 if (!Ret) 11025 return false; 11026 11027 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp 11028 // or int+fp structs, but are ignored for a struct with an fp field and 11029 // any number of zero-width bitfields. 11030 if (Field2Ty && ZeroWidthBitFieldCount > 0) 11031 return false; 11032 } 11033 return Field1Ty != nullptr; 11034 } 11035 11036 return false; 11037 } 11038 11039 // Determine if a struct is eligible for passing according to the floating 11040 // point calling convention (i.e., when flattened it contains a single fp 11041 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and 11042 // NeededArgGPRs are incremented appropriately. 11043 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 11044 CharUnits &Field1Off, 11045 llvm::Type *&Field2Ty, 11046 CharUnits &Field2Off, 11047 int &NeededArgGPRs, 11048 int &NeededArgFPRs) const { 11049 Field1Ty = nullptr; 11050 Field2Ty = nullptr; 11051 NeededArgGPRs = 0; 11052 NeededArgFPRs = 0; 11053 bool IsCandidate = detectFPCCEligibleStructHelper( 11054 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); 11055 // Not really a candidate if we have a single int but no float. 11056 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) 11057 return false; 11058 if (!IsCandidate) 11059 return false; 11060 if (Field1Ty && Field1Ty->isFloatingPointTy()) 11061 NeededArgFPRs++; 11062 else if (Field1Ty) 11063 NeededArgGPRs++; 11064 if (Field2Ty && Field2Ty->isFloatingPointTy()) 11065 NeededArgFPRs++; 11066 else if (Field2Ty) 11067 NeededArgGPRs++; 11068 return true; 11069 } 11070 11071 // Call getCoerceAndExpand for the two-element flattened struct described by 11072 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an 11073 // appropriate coerceToType and unpaddedCoerceToType. 11074 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( 11075 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, 11076 CharUnits Field2Off) const { 11077 SmallVector<llvm::Type *, 3> CoerceElts; 11078 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; 11079 if (!Field1Off.isZero()) 11080 CoerceElts.push_back(llvm::ArrayType::get( 11081 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); 11082 11083 CoerceElts.push_back(Field1Ty); 11084 UnpaddedCoerceElts.push_back(Field1Ty); 11085 11086 if (!Field2Ty) { 11087 return ABIArgInfo::getCoerceAndExpand( 11088 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), 11089 UnpaddedCoerceElts[0]); 11090 } 11091 11092 CharUnits Field2Align = 11093 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); 11094 CharUnits Field1End = Field1Off + 11095 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); 11096 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align); 11097 11098 CharUnits Padding = CharUnits::Zero(); 11099 if (Field2Off > Field2OffNoPadNoPack) 11100 Padding = Field2Off - Field2OffNoPadNoPack; 11101 else if (Field2Off != Field2Align && Field2Off > Field1End) 11102 Padding = Field2Off - Field1End; 11103 11104 bool IsPacked = !Field2Off.isMultipleOf(Field2Align); 11105 11106 if (!Padding.isZero()) 11107 CoerceElts.push_back(llvm::ArrayType::get( 11108 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); 11109 11110 CoerceElts.push_back(Field2Ty); 11111 UnpaddedCoerceElts.push_back(Field2Ty); 11112 11113 auto CoerceToType = 11114 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); 11115 auto UnpaddedCoerceToType = 11116 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); 11117 11118 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); 11119 } 11120 11121 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, 11122 int &ArgGPRsLeft, 11123 int &ArgFPRsLeft) const { 11124 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 11125 Ty = useFirstFieldIfTransparentUnion(Ty); 11126 11127 // Structures with either a non-trivial destructor or a non-trivial 11128 // copy constructor are always passed indirectly. 11129 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 11130 if (ArgGPRsLeft) 11131 ArgGPRsLeft -= 1; 11132 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 11133 CGCXXABI::RAA_DirectInMemory); 11134 } 11135 11136 // Ignore empty structs/unions. 11137 if (isEmptyRecord(getContext(), Ty, true)) 11138 return ABIArgInfo::getIgnore(); 11139 11140 uint64_t Size = getContext().getTypeSize(Ty); 11141 11142 // Pass floating point values via FPRs if possible. 11143 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && 11144 FLen >= Size && ArgFPRsLeft) { 11145 ArgFPRsLeft--; 11146 return ABIArgInfo::getDirect(); 11147 } 11148 11149 // Complex types for the hard float ABI must be passed direct rather than 11150 // using CoerceAndExpand. 11151 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { 11152 QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); 11153 if (getContext().getTypeSize(EltTy) <= FLen) { 11154 ArgFPRsLeft -= 2; 11155 return ABIArgInfo::getDirect(); 11156 } 11157 } 11158 11159 if (IsFixed && FLen && Ty->isStructureOrClassType()) { 11160 llvm::Type *Field1Ty = nullptr; 11161 llvm::Type *Field2Ty = nullptr; 11162 CharUnits Field1Off = CharUnits::Zero(); 11163 CharUnits Field2Off = CharUnits::Zero(); 11164 int NeededArgGPRs = 0; 11165 int NeededArgFPRs = 0; 11166 bool IsCandidate = 11167 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, 11168 NeededArgGPRs, NeededArgFPRs); 11169 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && 11170 NeededArgFPRs <= ArgFPRsLeft) { 11171 ArgGPRsLeft -= NeededArgGPRs; 11172 ArgFPRsLeft -= NeededArgFPRs; 11173 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, 11174 Field2Off); 11175 } 11176 } 11177 11178 uint64_t NeededAlign = getContext().getTypeAlign(Ty); 11179 bool MustUseStack = false; 11180 // Determine the number of GPRs needed to pass the current argument 11181 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" 11182 // register pairs, so may consume 3 registers. 11183 int NeededArgGPRs = 1; 11184 if (!IsFixed && NeededAlign == 2 * XLen) 11185 NeededArgGPRs = 2 + (ArgGPRsLeft % 2); 11186 else if (Size > XLen && Size <= 2 * XLen) 11187 NeededArgGPRs = 2; 11188 11189 if (NeededArgGPRs > ArgGPRsLeft) { 11190 MustUseStack = true; 11191 NeededArgGPRs = ArgGPRsLeft; 11192 } 11193 11194 ArgGPRsLeft -= NeededArgGPRs; 11195 11196 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { 11197 // Treat an enum type as its underlying type. 11198 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 11199 Ty = EnumTy->getDecl()->getIntegerType(); 11200 11201 // All integral types are promoted to XLen width, unless passed on the 11202 // stack. 11203 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { 11204 return extendType(Ty); 11205 } 11206 11207 if (const auto *EIT = Ty->getAs<BitIntType>()) { 11208 if (EIT->getNumBits() < XLen && !MustUseStack) 11209 return extendType(Ty); 11210 if (EIT->getNumBits() > 128 || 11211 (!getContext().getTargetInfo().hasInt128Type() && 11212 EIT->getNumBits() > 64)) 11213 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 11214 } 11215 11216 return ABIArgInfo::getDirect(); 11217 } 11218 11219 // Aggregates which are <= 2*XLen will be passed in registers if possible, 11220 // so coerce to integers. 11221 if (Size <= 2 * XLen) { 11222 unsigned Alignment = getContext().getTypeAlign(Ty); 11223 11224 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is 11225 // required, and a 2-element XLen array if only XLen alignment is required. 11226 if (Size <= XLen) { 11227 return ABIArgInfo::getDirect( 11228 llvm::IntegerType::get(getVMContext(), XLen)); 11229 } else if (Alignment == 2 * XLen) { 11230 return ABIArgInfo::getDirect( 11231 llvm::IntegerType::get(getVMContext(), 2 * XLen)); 11232 } else { 11233 return ABIArgInfo::getDirect(llvm::ArrayType::get( 11234 llvm::IntegerType::get(getVMContext(), XLen), 2)); 11235 } 11236 } 11237 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 11238 } 11239 11240 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { 11241 if (RetTy->isVoidType()) 11242 return ABIArgInfo::getIgnore(); 11243 11244 int ArgGPRsLeft = 2; 11245 int ArgFPRsLeft = FLen ? 2 : 0; 11246 11247 // The rules for return and argument types are the same, so defer to 11248 // classifyArgumentType. 11249 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, 11250 ArgFPRsLeft); 11251 } 11252 11253 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 11254 QualType Ty) const { 11255 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 11256 11257 // Empty records are ignored for parameter passing purposes. 11258 if (isEmptyRecord(getContext(), Ty, true)) { 11259 Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr), 11260 getVAListElementType(CGF), SlotSize); 11261 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 11262 return Addr; 11263 } 11264 11265 auto TInfo = getContext().getTypeInfoInChars(Ty); 11266 11267 // Arguments bigger than 2*Xlen bytes are passed indirectly. 11268 bool IsIndirect = TInfo.Width > 2 * SlotSize; 11269 11270 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, 11271 SlotSize, /*AllowHigherAlign=*/true); 11272 } 11273 11274 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { 11275 int TySize = getContext().getTypeSize(Ty); 11276 // RV64 ABI requires unsigned 32 bit integers to be sign extended. 11277 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 11278 return ABIArgInfo::getSignExtend(Ty); 11279 return ABIArgInfo::getExtend(Ty); 11280 } 11281 11282 namespace { 11283 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { 11284 public: 11285 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, 11286 unsigned FLen) 11287 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {} 11288 11289 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 11290 CodeGen::CodeGenModule &CGM) const override { 11291 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 11292 if (!FD) return; 11293 11294 const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); 11295 if (!Attr) 11296 return; 11297 11298 const char *Kind; 11299 switch (Attr->getInterrupt()) { 11300 case RISCVInterruptAttr::user: Kind = "user"; break; 11301 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; 11302 case RISCVInterruptAttr::machine: Kind = "machine"; break; 11303 } 11304 11305 auto *Fn = cast<llvm::Function>(GV); 11306 11307 Fn->addFnAttr("interrupt", Kind); 11308 } 11309 }; 11310 } // namespace 11311 11312 //===----------------------------------------------------------------------===// 11313 // VE ABI Implementation. 11314 // 11315 namespace { 11316 class VEABIInfo : public DefaultABIInfo { 11317 public: 11318 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 11319 11320 private: 11321 ABIArgInfo classifyReturnType(QualType RetTy) const; 11322 ABIArgInfo classifyArgumentType(QualType RetTy) const; 11323 void computeInfo(CGFunctionInfo &FI) const override; 11324 }; 11325 } // end anonymous namespace 11326 11327 ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const { 11328 if (Ty->isAnyComplexType()) 11329 return ABIArgInfo::getDirect(); 11330 uint64_t Size = getContext().getTypeSize(Ty); 11331 if (Size < 64 && Ty->isIntegerType()) 11332 return ABIArgInfo::getExtend(Ty); 11333 return DefaultABIInfo::classifyReturnType(Ty); 11334 } 11335 11336 ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const { 11337 if (Ty->isAnyComplexType()) 11338 return ABIArgInfo::getDirect(); 11339 uint64_t Size = getContext().getTypeSize(Ty); 11340 if (Size < 64 && Ty->isIntegerType()) 11341 return ABIArgInfo::getExtend(Ty); 11342 return DefaultABIInfo::classifyArgumentType(Ty); 11343 } 11344 11345 void VEABIInfo::computeInfo(CGFunctionInfo &FI) const { 11346 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 11347 for (auto &Arg : FI.arguments()) 11348 Arg.info = classifyArgumentType(Arg.type); 11349 } 11350 11351 namespace { 11352 class VETargetCodeGenInfo : public TargetCodeGenInfo { 11353 public: 11354 VETargetCodeGenInfo(CodeGenTypes &CGT) 11355 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {} 11356 // VE ABI requires the arguments of variadic and prototype-less functions 11357 // are passed in both registers and memory. 11358 bool isNoProtoCallVariadic(const CallArgList &args, 11359 const FunctionNoProtoType *fnType) const override { 11360 return true; 11361 } 11362 }; 11363 } // end anonymous namespace 11364 11365 //===----------------------------------------------------------------------===// 11366 // CSKY ABI Implementation 11367 //===----------------------------------------------------------------------===// 11368 namespace { 11369 class CSKYABIInfo : public DefaultABIInfo { 11370 static const int NumArgGPRs = 4; 11371 static const int NumArgFPRs = 4; 11372 11373 static const unsigned XLen = 32; 11374 unsigned FLen; 11375 11376 public: 11377 CSKYABIInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen) 11378 : DefaultABIInfo(CGT), FLen(FLen) {} 11379 11380 void computeInfo(CGFunctionInfo &FI) const override; 11381 ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft, 11382 int &ArgFPRsLeft, 11383 bool isReturnType = false) const; 11384 ABIArgInfo classifyReturnType(QualType RetTy) const; 11385 11386 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 11387 QualType Ty) const override; 11388 }; 11389 11390 } // end anonymous namespace 11391 11392 void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const { 11393 QualType RetTy = FI.getReturnType(); 11394 if (!getCXXABI().classifyReturnType(FI)) 11395 FI.getReturnInfo() = classifyReturnType(RetTy); 11396 11397 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; 11398 11399 // We must track the number of GPRs used in order to conform to the CSKY 11400 // ABI, as integer scalars passed in registers should have signext/zeroext 11401 // when promoted. 11402 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 11403 int ArgFPRsLeft = FLen ? NumArgFPRs : 0; 11404 11405 for (auto &ArgInfo : FI.arguments()) { 11406 ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft, ArgFPRsLeft); 11407 } 11408 } 11409 11410 Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 11411 QualType Ty) const { 11412 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 11413 11414 // Empty records are ignored for parameter passing purposes. 11415 if (isEmptyRecord(getContext(), Ty, true)) { 11416 Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr), 11417 getVAListElementType(CGF), SlotSize); 11418 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 11419 return Addr; 11420 } 11421 11422 auto TInfo = getContext().getTypeInfoInChars(Ty); 11423 11424 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize, 11425 /*AllowHigherAlign=*/true); 11426 } 11427 11428 ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft, 11429 int &ArgFPRsLeft, 11430 bool isReturnType) const { 11431 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 11432 Ty = useFirstFieldIfTransparentUnion(Ty); 11433 11434 // Structures with either a non-trivial destructor or a non-trivial 11435 // copy constructor are always passed indirectly. 11436 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 11437 if (ArgGPRsLeft) 11438 ArgGPRsLeft -= 1; 11439 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 11440 CGCXXABI::RAA_DirectInMemory); 11441 } 11442 11443 // Ignore empty structs/unions. 11444 if (isEmptyRecord(getContext(), Ty, true)) 11445 return ABIArgInfo::getIgnore(); 11446 11447 if (!Ty->getAsUnionType()) 11448 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 11449 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 11450 11451 uint64_t Size = getContext().getTypeSize(Ty); 11452 // Pass floating point values via FPRs if possible. 11453 if (Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size && 11454 ArgFPRsLeft) { 11455 ArgFPRsLeft--; 11456 return ABIArgInfo::getDirect(); 11457 } 11458 11459 // Complex types for the hard float ABI must be passed direct rather than 11460 // using CoerceAndExpand. 11461 if (Ty->isComplexType() && FLen && !isReturnType) { 11462 QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); 11463 if (getContext().getTypeSize(EltTy) <= FLen) { 11464 ArgFPRsLeft -= 2; 11465 return ABIArgInfo::getDirect(); 11466 } 11467 } 11468 11469 if (!isAggregateTypeForABI(Ty)) { 11470 // Treat an enum type as its underlying type. 11471 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 11472 Ty = EnumTy->getDecl()->getIntegerType(); 11473 11474 // All integral types are promoted to XLen width, unless passed on the 11475 // stack. 11476 if (Size < XLen && Ty->isIntegralOrEnumerationType()) 11477 return ABIArgInfo::getExtend(Ty); 11478 11479 if (const auto *EIT = Ty->getAs<BitIntType>()) { 11480 if (EIT->getNumBits() < XLen) 11481 return ABIArgInfo::getExtend(Ty); 11482 } 11483 11484 return ABIArgInfo::getDirect(); 11485 } 11486 11487 // For argument type, the first 4*XLen parts of aggregate will be passed 11488 // in registers, and the rest will be passed in stack. 11489 // So we can coerce to integers directly and let backend handle it correctly. 11490 // For return type, aggregate which <= 2*XLen will be returned in registers. 11491 // Otherwise, aggregate will be returned indirectly. 11492 if (!isReturnType || (isReturnType && Size <= 2 * XLen)) { 11493 if (Size <= XLen) { 11494 return ABIArgInfo::getDirect( 11495 llvm::IntegerType::get(getVMContext(), XLen)); 11496 } else { 11497 return ABIArgInfo::getDirect(llvm::ArrayType::get( 11498 llvm::IntegerType::get(getVMContext(), XLen), (Size + 31) / XLen)); 11499 } 11500 } 11501 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 11502 } 11503 11504 ABIArgInfo CSKYABIInfo::classifyReturnType(QualType RetTy) const { 11505 if (RetTy->isVoidType()) 11506 return ABIArgInfo::getIgnore(); 11507 11508 int ArgGPRsLeft = 2; 11509 int ArgFPRsLeft = FLen ? 1 : 0; 11510 11511 // The rules for return and argument types are the same, so defer to 11512 // classifyArgumentType. 11513 return classifyArgumentType(RetTy, ArgGPRsLeft, ArgFPRsLeft, true); 11514 } 11515 11516 namespace { 11517 class CSKYTargetCodeGenInfo : public TargetCodeGenInfo { 11518 public: 11519 CSKYTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen) 11520 : TargetCodeGenInfo(std::make_unique<CSKYABIInfo>(CGT, FLen)) {} 11521 }; 11522 } // end anonymous namespace 11523 11524 //===----------------------------------------------------------------------===// 11525 // Driver code 11526 //===----------------------------------------------------------------------===// 11527 11528 bool CodeGenModule::supportsCOMDAT() const { 11529 return getTriple().supportsCOMDAT(); 11530 } 11531 11532 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 11533 if (TheTargetCodeGenInfo) 11534 return *TheTargetCodeGenInfo; 11535 11536 // Helper to set the unique_ptr while still keeping the return value. 11537 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 11538 this->TheTargetCodeGenInfo.reset(P); 11539 return *P; 11540 }; 11541 11542 const llvm::Triple &Triple = getTarget().getTriple(); 11543 switch (Triple.getArch()) { 11544 default: 11545 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 11546 11547 case llvm::Triple::le32: 11548 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 11549 case llvm::Triple::m68k: 11550 return SetCGInfo(new M68kTargetCodeGenInfo(Types)); 11551 case llvm::Triple::mips: 11552 case llvm::Triple::mipsel: 11553 if (Triple.getOS() == llvm::Triple::NaCl) 11554 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 11555 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 11556 11557 case llvm::Triple::mips64: 11558 case llvm::Triple::mips64el: 11559 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 11560 11561 case llvm::Triple::avr: { 11562 // For passing parameters, R8~R25 are used on avr, and R18~R25 are used 11563 // on avrtiny. For passing return value, R18~R25 are used on avr, and 11564 // R22~R25 are used on avrtiny. 11565 unsigned NPR = getTarget().getABI() == "avrtiny" ? 6 : 18; 11566 unsigned NRR = getTarget().getABI() == "avrtiny" ? 4 : 8; 11567 return SetCGInfo(new AVRTargetCodeGenInfo(Types, NPR, NRR)); 11568 } 11569 11570 case llvm::Triple::aarch64: 11571 case llvm::Triple::aarch64_32: 11572 case llvm::Triple::aarch64_be: { 11573 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 11574 if (getTarget().getABI() == "darwinpcs") 11575 Kind = AArch64ABIInfo::DarwinPCS; 11576 else if (Triple.isOSWindows()) 11577 return SetCGInfo( 11578 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 11579 11580 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 11581 } 11582 11583 case llvm::Triple::wasm32: 11584 case llvm::Triple::wasm64: { 11585 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP; 11586 if (getTarget().getABI() == "experimental-mv") 11587 Kind = WebAssemblyABIInfo::ExperimentalMV; 11588 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind)); 11589 } 11590 11591 case llvm::Triple::arm: 11592 case llvm::Triple::armeb: 11593 case llvm::Triple::thumb: 11594 case llvm::Triple::thumbeb: { 11595 if (Triple.getOS() == llvm::Triple::Win32) { 11596 return SetCGInfo( 11597 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 11598 } 11599 11600 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 11601 StringRef ABIStr = getTarget().getABI(); 11602 if (ABIStr == "apcs-gnu") 11603 Kind = ARMABIInfo::APCS; 11604 else if (ABIStr == "aapcs16") 11605 Kind = ARMABIInfo::AAPCS16_VFP; 11606 else if (CodeGenOpts.FloatABI == "hard" || 11607 (CodeGenOpts.FloatABI != "soft" && 11608 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 11609 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 11610 Triple.getEnvironment() == llvm::Triple::EABIHF))) 11611 Kind = ARMABIInfo::AAPCS_VFP; 11612 11613 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 11614 } 11615 11616 case llvm::Triple::ppc: { 11617 if (Triple.isOSAIX()) 11618 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false)); 11619 11620 bool IsSoftFloat = 11621 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe"); 11622 bool RetSmallStructInRegABI = 11623 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 11624 return SetCGInfo( 11625 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); 11626 } 11627 case llvm::Triple::ppcle: { 11628 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 11629 bool RetSmallStructInRegABI = 11630 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 11631 return SetCGInfo( 11632 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); 11633 } 11634 case llvm::Triple::ppc64: 11635 if (Triple.isOSAIX()) 11636 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true)); 11637 11638 if (Triple.isOSBinFormatELF()) { 11639 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 11640 if (getTarget().getABI() == "elfv2") 11641 Kind = PPC64_SVR4_ABIInfo::ELFv2; 11642 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 11643 11644 return SetCGInfo( 11645 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat)); 11646 } 11647 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 11648 case llvm::Triple::ppc64le: { 11649 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 11650 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 11651 if (getTarget().getABI() == "elfv1") 11652 Kind = PPC64_SVR4_ABIInfo::ELFv1; 11653 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 11654 11655 return SetCGInfo( 11656 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat)); 11657 } 11658 11659 case llvm::Triple::nvptx: 11660 case llvm::Triple::nvptx64: 11661 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 11662 11663 case llvm::Triple::msp430: 11664 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 11665 11666 case llvm::Triple::riscv32: 11667 case llvm::Triple::riscv64: { 11668 StringRef ABIStr = getTarget().getABI(); 11669 unsigned XLen = getTarget().getPointerWidth(0); 11670 unsigned ABIFLen = 0; 11671 if (ABIStr.endswith("f")) 11672 ABIFLen = 32; 11673 else if (ABIStr.endswith("d")) 11674 ABIFLen = 64; 11675 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); 11676 } 11677 11678 case llvm::Triple::systemz: { 11679 bool SoftFloat = CodeGenOpts.FloatABI == "soft"; 11680 bool HasVector = !SoftFloat && getTarget().getABI() == "vector"; 11681 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat)); 11682 } 11683 11684 case llvm::Triple::tce: 11685 case llvm::Triple::tcele: 11686 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 11687 11688 case llvm::Triple::x86: { 11689 bool IsDarwinVectorABI = Triple.isOSDarwin(); 11690 bool RetSmallStructInRegABI = 11691 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 11692 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 11693 11694 if (Triple.getOS() == llvm::Triple::Win32) { 11695 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 11696 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 11697 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 11698 } else { 11699 return SetCGInfo(new X86_32TargetCodeGenInfo( 11700 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 11701 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 11702 CodeGenOpts.FloatABI == "soft")); 11703 } 11704 } 11705 11706 case llvm::Triple::x86_64: { 11707 StringRef ABI = getTarget().getABI(); 11708 X86AVXABILevel AVXLevel = 11709 (ABI == "avx512" 11710 ? X86AVXABILevel::AVX512 11711 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 11712 11713 switch (Triple.getOS()) { 11714 case llvm::Triple::Win32: 11715 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 11716 default: 11717 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 11718 } 11719 } 11720 case llvm::Triple::hexagon: 11721 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 11722 case llvm::Triple::lanai: 11723 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 11724 case llvm::Triple::r600: 11725 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 11726 case llvm::Triple::amdgcn: 11727 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 11728 case llvm::Triple::sparc: 11729 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 11730 case llvm::Triple::sparcv9: 11731 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 11732 case llvm::Triple::xcore: 11733 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 11734 case llvm::Triple::arc: 11735 return SetCGInfo(new ARCTargetCodeGenInfo(Types)); 11736 case llvm::Triple::spir: 11737 case llvm::Triple::spir64: 11738 return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types)); 11739 case llvm::Triple::spirv32: 11740 case llvm::Triple::spirv64: 11741 return SetCGInfo(new SPIRVTargetCodeGenInfo(Types)); 11742 case llvm::Triple::ve: 11743 return SetCGInfo(new VETargetCodeGenInfo(Types)); 11744 case llvm::Triple::csky: { 11745 bool IsSoftFloat = !getTarget().hasFeature("hard-float-abi"); 11746 bool hasFP64 = getTarget().hasFeature("fpuv2_df") || 11747 getTarget().hasFeature("fpuv3_df"); 11748 return SetCGInfo(new CSKYTargetCodeGenInfo(Types, IsSoftFloat ? 0 11749 : hasFP64 ? 64 11750 : 32)); 11751 } 11752 } 11753 } 11754 11755 /// Create an OpenCL kernel for an enqueued block. 11756 /// 11757 /// The kernel has the same function type as the block invoke function. Its 11758 /// name is the name of the block invoke function postfixed with "_kernel". 11759 /// It simply calls the block invoke function then returns. 11760 llvm::Function * 11761 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, 11762 llvm::Function *Invoke, 11763 llvm::Type *BlockTy) const { 11764 auto *InvokeFT = Invoke->getFunctionType(); 11765 auto &C = CGF.getLLVMContext(); 11766 std::string Name = Invoke->getName().str() + "_kernel"; 11767 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), 11768 InvokeFT->params(), false); 11769 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name, 11770 &CGF.CGM.getModule()); 11771 auto IP = CGF.Builder.saveIP(); 11772 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11773 auto &Builder = CGF.Builder; 11774 Builder.SetInsertPoint(BB); 11775 llvm::SmallVector<llvm::Value *, 2> Args(llvm::make_pointer_range(F->args())); 11776 llvm::CallInst *call = Builder.CreateCall(Invoke, Args); 11777 call->setCallingConv(Invoke->getCallingConv()); 11778 Builder.CreateRetVoid(); 11779 Builder.restoreIP(IP); 11780 return F; 11781 } 11782 11783 /// Create an OpenCL kernel for an enqueued block. 11784 /// 11785 /// The type of the first argument (the block literal) is the struct type 11786 /// of the block literal instead of a pointer type. The first argument 11787 /// (block literal) is passed directly by value to the kernel. The kernel 11788 /// allocates the same type of struct on stack and stores the block literal 11789 /// to it and passes its pointer to the block invoke function. The kernel 11790 /// has "enqueued-block" function attribute and kernel argument metadata. 11791 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( 11792 CodeGenFunction &CGF, llvm::Function *Invoke, 11793 llvm::Type *BlockTy) const { 11794 auto &Builder = CGF.Builder; 11795 auto &C = CGF.getLLVMContext(); 11796 11797 auto *InvokeFT = Invoke->getFunctionType(); 11798 llvm::SmallVector<llvm::Type *, 2> ArgTys; 11799 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; 11800 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; 11801 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; 11802 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; 11803 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; 11804 llvm::SmallVector<llvm::Metadata *, 8> ArgNames; 11805 11806 ArgTys.push_back(BlockTy); 11807 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11808 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); 11809 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11810 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11811 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11812 ArgNames.push_back(llvm::MDString::get(C, "block_literal")); 11813 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { 11814 ArgTys.push_back(InvokeFT->getParamType(I)); 11815 ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); 11816 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); 11817 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11818 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); 11819 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11820 ArgNames.push_back( 11821 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); 11822 } 11823 std::string Name = Invoke->getName().str() + "_kernel"; 11824 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 11825 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 11826 &CGF.CGM.getModule()); 11827 F->addFnAttr("enqueued-block"); 11828 auto IP = CGF.Builder.saveIP(); 11829 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11830 Builder.SetInsertPoint(BB); 11831 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy); 11832 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); 11833 BlockPtr->setAlignment(BlockAlign); 11834 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); 11835 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); 11836 llvm::SmallVector<llvm::Value *, 2> Args; 11837 Args.push_back(Cast); 11838 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) 11839 Args.push_back(I); 11840 llvm::CallInst *call = Builder.CreateCall(Invoke, Args); 11841 call->setCallingConv(Invoke->getCallingConv()); 11842 Builder.CreateRetVoid(); 11843 Builder.restoreIP(IP); 11844 11845 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); 11846 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); 11847 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); 11848 F->setMetadata("kernel_arg_base_type", 11849 llvm::MDNode::get(C, ArgBaseTypeNames)); 11850 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); 11851 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 11852 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); 11853 11854 return F; 11855 } 11856