1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/FunctionLoweringInfo.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/IR/IntrinsicsAMDGPU.h" 26 27 #define DEBUG_TYPE "amdgpu-call-lowering" 28 29 using namespace llvm; 30 31 namespace { 32 33 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 34 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 35 Register ValVReg, const CCValAssign &VA) { 36 if (VA.getLocVT().getSizeInBits() < 32) { 37 // 16-bit types are reported as legal for 32-bit registers. We need to 38 // extend and do a 32-bit copy to avoid the verifier complaining about it. 39 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 40 } 41 42 return Handler.extendRegister(ValVReg, VA); 43 } 44 45 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 46 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 47 MachineInstrBuilder MIB) 48 : OutgoingValueHandler(B, MRI), MIB(MIB) {} 49 50 MachineInstrBuilder MIB; 51 52 Register getStackAddress(uint64_t Size, int64_t Offset, 53 MachinePointerInfo &MPO, 54 ISD::ArgFlagsTy Flags) override { 55 llvm_unreachable("not implemented"); 56 } 57 58 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 59 const MachinePointerInfo &MPO, 60 const CCValAssign &VA) override { 61 llvm_unreachable("not implemented"); 62 } 63 64 void assignValueToReg(Register ValVReg, Register PhysReg, 65 const CCValAssign &VA) override { 66 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 67 68 // If this is a scalar return, insert a readfirstlane just in case the value 69 // ends up in a VGPR. 70 // FIXME: Assert this is a shader return. 71 const SIRegisterInfo *TRI 72 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 73 if (TRI->isSGPRReg(MRI, PhysReg)) { 74 LLT Ty = MRI.getType(ExtReg); 75 LLT S32 = LLT::scalar(32); 76 if (Ty != S32) { 77 // FIXME: We should probably support readfirstlane intrinsics with all 78 // legal 32-bit types. 79 assert(Ty.getSizeInBits() == 32); 80 if (Ty.isPointer()) 81 ExtReg = MIRBuilder.buildPtrToInt(S32, ExtReg).getReg(0); 82 else 83 ExtReg = MIRBuilder.buildBitcast(S32, ExtReg).getReg(0); 84 } 85 86 auto ToSGPR = MIRBuilder 87 .buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 88 {MRI.getType(ExtReg)}) 89 .addReg(ExtReg); 90 ExtReg = ToSGPR.getReg(0); 91 } 92 93 MIRBuilder.buildCopy(PhysReg, ExtReg); 94 MIB.addUse(PhysReg, RegState::Implicit); 95 } 96 }; 97 98 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 99 uint64_t StackUsed = 0; 100 101 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 102 : IncomingValueHandler(B, MRI) {} 103 104 Register getStackAddress(uint64_t Size, int64_t Offset, 105 MachinePointerInfo &MPO, 106 ISD::ArgFlagsTy Flags) override { 107 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 108 109 // Byval is assumed to be writable memory, but other stack passed arguments 110 // are not. 111 const bool IsImmutable = !Flags.isByVal(); 112 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 113 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 114 auto AddrReg = MIRBuilder.buildFrameIndex( 115 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 116 StackUsed = std::max(StackUsed, Size + Offset); 117 return AddrReg.getReg(0); 118 } 119 120 void assignValueToReg(Register ValVReg, Register PhysReg, 121 const CCValAssign &VA) override { 122 markPhysRegUsed(PhysReg); 123 124 if (VA.getLocVT().getSizeInBits() < 32) { 125 // 16-bit types are reported as legal for 32-bit registers. We need to do 126 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 127 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 128 129 // If we have signext/zeroext, it applies to the whole 32-bit register 130 // before truncation. 131 auto Extended = 132 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 133 MIRBuilder.buildTrunc(ValVReg, Extended); 134 return; 135 } 136 137 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 138 } 139 140 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 141 const MachinePointerInfo &MPO, 142 const CCValAssign &VA) override { 143 MachineFunction &MF = MIRBuilder.getMF(); 144 145 auto MMO = MF.getMachineMemOperand( 146 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 147 inferAlignFromPtrInfo(MF, MPO)); 148 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 149 } 150 151 /// How the physical register gets marked varies between formal 152 /// parameters (it's a basic-block live-in), and a call instruction 153 /// (it's an implicit-def of the BL). 154 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 155 }; 156 157 struct FormalArgHandler : public AMDGPUIncomingArgHandler { 158 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 159 : AMDGPUIncomingArgHandler(B, MRI) {} 160 161 void markPhysRegUsed(unsigned PhysReg) override { 162 MIRBuilder.getMBB().addLiveIn(PhysReg); 163 } 164 }; 165 166 struct CallReturnHandler : public AMDGPUIncomingArgHandler { 167 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 168 MachineInstrBuilder MIB) 169 : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} 170 171 void markPhysRegUsed(unsigned PhysReg) override { 172 MIB.addDef(PhysReg, RegState::Implicit); 173 } 174 175 MachineInstrBuilder MIB; 176 }; 177 178 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 179 /// For tail calls, the byte offset of the call's argument area from the 180 /// callee's. Unused elsewhere. 181 int FPDiff; 182 183 // Cache the SP register vreg if we need it more than once in this call site. 184 Register SPReg; 185 186 bool IsTailCall; 187 188 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 189 MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 190 bool IsTailCall = false, int FPDiff = 0) 191 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), 192 IsTailCall(IsTailCall) {} 193 194 Register getStackAddress(uint64_t Size, int64_t Offset, 195 MachinePointerInfo &MPO, 196 ISD::ArgFlagsTy Flags) override { 197 MachineFunction &MF = MIRBuilder.getMF(); 198 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 199 const LLT S32 = LLT::scalar(32); 200 201 if (IsTailCall) { 202 Offset += FPDiff; 203 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 204 auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); 205 MPO = MachinePointerInfo::getFixedStack(MF, FI); 206 return FIReg.getReg(0); 207 } 208 209 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 210 211 if (!SPReg) { 212 const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); 213 if (ST.enableFlatScratch()) { 214 // The stack is accessed unswizzled, so we can use a regular copy. 215 SPReg = MIRBuilder.buildCopy(PtrTy, 216 MFI->getStackPtrOffsetReg()).getReg(0); 217 } else { 218 // The address we produce here, without knowing the use context, is going 219 // to be interpreted as a vector address, so we need to convert to a 220 // swizzled address. 221 SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, 222 {MFI->getStackPtrOffsetReg()}).getReg(0); 223 } 224 } 225 226 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 227 228 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 229 MPO = MachinePointerInfo::getStack(MF, Offset); 230 return AddrReg.getReg(0); 231 } 232 233 void assignValueToReg(Register ValVReg, Register PhysReg, 234 const CCValAssign &VA) override { 235 MIB.addUse(PhysReg, RegState::Implicit); 236 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 237 MIRBuilder.buildCopy(PhysReg, ExtReg); 238 } 239 240 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 241 const MachinePointerInfo &MPO, 242 const CCValAssign &VA) override { 243 MachineFunction &MF = MIRBuilder.getMF(); 244 uint64_t LocMemOffset = VA.getLocMemOffset(); 245 const auto &ST = MF.getSubtarget<GCNSubtarget>(); 246 247 auto MMO = MF.getMachineMemOperand( 248 MPO, MachineMemOperand::MOStore, MemTy, 249 commonAlignment(ST.getStackAlignment(), LocMemOffset)); 250 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 251 } 252 253 void assignValueToAddress(const CallLowering::ArgInfo &Arg, 254 unsigned ValRegIndex, Register Addr, LLT MemTy, 255 const MachinePointerInfo &MPO, 256 const CCValAssign &VA) override { 257 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 258 ? extendRegister(Arg.Regs[ValRegIndex], VA) 259 : Arg.Regs[ValRegIndex]; 260 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); 261 } 262 }; 263 } 264 265 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 266 : CallLowering(&TLI) { 267 } 268 269 // FIXME: Compatibility shim 270 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 271 switch (MIOpc) { 272 case TargetOpcode::G_SEXT: 273 return ISD::SIGN_EXTEND; 274 case TargetOpcode::G_ZEXT: 275 return ISD::ZERO_EXTEND; 276 case TargetOpcode::G_ANYEXT: 277 return ISD::ANY_EXTEND; 278 default: 279 llvm_unreachable("not an extend opcode"); 280 } 281 } 282 283 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 284 CallingConv::ID CallConv, 285 SmallVectorImpl<BaseArgInfo> &Outs, 286 bool IsVarArg) const { 287 // For shaders. Vector types should be explicitly handled by CC. 288 if (AMDGPU::isEntryFunctionCC(CallConv)) 289 return true; 290 291 SmallVector<CCValAssign, 16> ArgLocs; 292 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 293 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 294 MF.getFunction().getContext()); 295 296 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 297 } 298 299 /// Lower the return value for the already existing \p Ret. This assumes that 300 /// \p B's insertion point is correct. 301 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 302 const Value *Val, ArrayRef<Register> VRegs, 303 MachineInstrBuilder &Ret) const { 304 if (!Val) 305 return true; 306 307 auto &MF = B.getMF(); 308 const auto &F = MF.getFunction(); 309 const DataLayout &DL = MF.getDataLayout(); 310 MachineRegisterInfo *MRI = B.getMRI(); 311 LLVMContext &Ctx = F.getContext(); 312 313 CallingConv::ID CC = F.getCallingConv(); 314 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 315 316 SmallVector<EVT, 8> SplitEVTs; 317 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 318 assert(VRegs.size() == SplitEVTs.size() && 319 "For each split Type there should be exactly one VReg."); 320 321 SmallVector<ArgInfo, 8> SplitRetInfos; 322 323 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 324 EVT VT = SplitEVTs[i]; 325 Register Reg = VRegs[i]; 326 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); 327 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 328 329 if (VT.isScalarInteger()) { 330 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 331 if (RetInfo.Flags[0].isSExt()) { 332 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 333 ExtendOp = TargetOpcode::G_SEXT; 334 } else if (RetInfo.Flags[0].isZExt()) { 335 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 336 ExtendOp = TargetOpcode::G_ZEXT; 337 } 338 339 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 340 extOpcodeToISDExtOpcode(ExtendOp)); 341 if (ExtVT != VT) { 342 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 343 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 344 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 345 } 346 } 347 348 if (Reg != RetInfo.Regs[0]) { 349 RetInfo.Regs[0] = Reg; 350 // Reset the arg flags after modifying Reg. 351 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 352 } 353 354 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 355 } 356 357 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 358 359 OutgoingValueAssigner Assigner(AssignFn); 360 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); 361 return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, 362 CC, F.isVarArg()); 363 } 364 365 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 366 ArrayRef<Register> VRegs, 367 FunctionLoweringInfo &FLI) const { 368 369 MachineFunction &MF = B.getMF(); 370 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 371 MFI->setIfReturnsVoid(!Val); 372 373 assert(!Val == VRegs.empty() && "Return value without a vreg"); 374 375 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 376 const bool IsShader = AMDGPU::isShader(CC); 377 const bool IsWaveEnd = 378 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 379 if (IsWaveEnd) { 380 B.buildInstr(AMDGPU::S_ENDPGM) 381 .addImm(0); 382 return true; 383 } 384 385 unsigned ReturnOpc = 386 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN; 387 auto Ret = B.buildInstrNoInsert(ReturnOpc); 388 389 if (!FLI.CanLowerReturn) 390 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 391 else if (!lowerReturnVal(B, Val, VRegs, Ret)) 392 return false; 393 394 // TODO: Handle CalleeSavedRegsViaCopy. 395 396 B.insertInstr(Ret); 397 return true; 398 } 399 400 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 401 uint64_t Offset) const { 402 MachineFunction &MF = B.getMF(); 403 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 404 MachineRegisterInfo &MRI = MF.getRegInfo(); 405 Register KernArgSegmentPtr = 406 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 407 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 408 409 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 410 411 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 412 } 413 414 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, 415 uint64_t Offset, 416 Align Alignment) const { 417 MachineFunction &MF = B.getMF(); 418 const Function &F = MF.getFunction(); 419 const DataLayout &DL = F.getParent()->getDataLayout(); 420 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 421 422 LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 423 424 SmallVector<ArgInfo, 32> SplitArgs; 425 SmallVector<uint64_t> FieldOffsets; 426 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); 427 428 unsigned Idx = 0; 429 for (ArgInfo &SplitArg : SplitArgs) { 430 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 431 lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); 432 433 LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); 434 if (SplitArg.Flags[0].isPointer()) { 435 // Compensate for losing pointeriness in splitValueTypes. 436 LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), 437 ArgTy.getScalarSizeInBits()); 438 ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) 439 : PtrTy; 440 } 441 442 MachineMemOperand *MMO = MF.getMachineMemOperand( 443 PtrInfo, 444 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 445 MachineMemOperand::MOInvariant, 446 ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); 447 448 assert(SplitArg.Regs.size() == 1); 449 450 B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); 451 ++Idx; 452 } 453 } 454 455 // Allocate special inputs passed in user SGPRs. 456 static void allocateHSAUserSGPRs(CCState &CCInfo, 457 MachineIRBuilder &B, 458 MachineFunction &MF, 459 const SIRegisterInfo &TRI, 460 SIMachineFunctionInfo &Info) { 461 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 462 const GCNUserSGPRUsageInfo &UserSGPRInfo = Info.getUserSGPRInfo(); 463 if (UserSGPRInfo.hasPrivateSegmentBuffer()) { 464 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 465 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 466 CCInfo.AllocateReg(PrivateSegmentBufferReg); 467 } 468 469 if (UserSGPRInfo.hasDispatchPtr()) { 470 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 471 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 472 CCInfo.AllocateReg(DispatchPtrReg); 473 } 474 475 const Module *M = MF.getFunction().getParent(); 476 if (UserSGPRInfo.hasQueuePtr() && 477 AMDGPU::getCodeObjectVersion(*M) < AMDGPU::AMDHSA_COV5) { 478 Register QueuePtrReg = Info.addQueuePtr(TRI); 479 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 480 CCInfo.AllocateReg(QueuePtrReg); 481 } 482 483 if (UserSGPRInfo.hasKernargSegmentPtr()) { 484 MachineRegisterInfo &MRI = MF.getRegInfo(); 485 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 486 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 487 Register VReg = MRI.createGenericVirtualRegister(P4); 488 MRI.addLiveIn(InputPtrReg, VReg); 489 B.getMBB().addLiveIn(InputPtrReg); 490 B.buildCopy(VReg, InputPtrReg); 491 CCInfo.AllocateReg(InputPtrReg); 492 } 493 494 if (UserSGPRInfo.hasDispatchID()) { 495 Register DispatchIDReg = Info.addDispatchID(TRI); 496 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 497 CCInfo.AllocateReg(DispatchIDReg); 498 } 499 500 if (UserSGPRInfo.hasFlatScratchInit()) { 501 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 502 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 503 CCInfo.AllocateReg(FlatScratchInitReg); 504 } 505 506 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 507 // these from the dispatch pointer. 508 } 509 510 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 511 MachineIRBuilder &B, const Function &F, 512 ArrayRef<ArrayRef<Register>> VRegs) const { 513 MachineFunction &MF = B.getMF(); 514 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 515 MachineRegisterInfo &MRI = MF.getRegInfo(); 516 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 517 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 518 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 519 const DataLayout &DL = F.getParent()->getDataLayout(); 520 521 SmallVector<CCValAssign, 16> ArgLocs; 522 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 523 524 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 525 526 unsigned i = 0; 527 const Align KernArgBaseAlign(16); 528 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(); 529 uint64_t ExplicitArgOffset = 0; 530 531 // TODO: Align down to dword alignment and extract bits for extending loads. 532 for (auto &Arg : F.args()) { 533 const bool IsByRef = Arg.hasByRefAttr(); 534 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 535 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 536 if (AllocSize == 0) 537 continue; 538 539 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt; 540 Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy); 541 542 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 543 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 544 545 if (Arg.use_empty()) { 546 ++i; 547 continue; 548 } 549 550 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 551 552 if (IsByRef) { 553 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 554 555 assert(VRegs[i].size() == 1 && 556 "expected only one register for byval pointers"); 557 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 558 lowerParameterPtr(VRegs[i][0], B, ArgOffset); 559 } else { 560 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 561 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 562 lowerParameterPtr(PtrReg, B, ArgOffset); 563 564 B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 565 } 566 } else { 567 ArgInfo OrigArg(VRegs[i], Arg, i); 568 const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; 569 setArgFlags(OrigArg, OrigArgIdx, DL, F); 570 lowerParameter(B, OrigArg, ArgOffset, Alignment); 571 } 572 573 ++i; 574 } 575 576 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 577 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 578 return true; 579 } 580 581 bool AMDGPUCallLowering::lowerFormalArguments( 582 MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 583 FunctionLoweringInfo &FLI) const { 584 CallingConv::ID CC = F.getCallingConv(); 585 586 // The infrastructure for normal calling convention lowering is essentially 587 // useless for kernels. We want to avoid any kind of legalization or argument 588 // splitting. 589 if (CC == CallingConv::AMDGPU_KERNEL) 590 return lowerFormalArgumentsKernel(B, F, VRegs); 591 592 const bool IsGraphics = AMDGPU::isGraphics(CC); 593 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 594 595 MachineFunction &MF = B.getMF(); 596 MachineBasicBlock &MBB = B.getMBB(); 597 MachineRegisterInfo &MRI = MF.getRegInfo(); 598 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 599 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 600 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 601 const DataLayout &DL = F.getParent()->getDataLayout(); 602 603 SmallVector<CCValAssign, 16> ArgLocs; 604 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 605 const GCNUserSGPRUsageInfo &UserSGPRInfo = Info->getUserSGPRInfo(); 606 607 if (UserSGPRInfo.hasImplicitBufferPtr()) { 608 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 609 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 610 CCInfo.AllocateReg(ImplicitBufferPtrReg); 611 } 612 613 // FIXME: This probably isn't defined for mesa 614 if (UserSGPRInfo.hasFlatScratchInit() && !Subtarget.isAmdPalOS()) { 615 Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 616 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 617 CCInfo.AllocateReg(FlatScratchInitReg); 618 } 619 620 SmallVector<ArgInfo, 32> SplitArgs; 621 unsigned Idx = 0; 622 unsigned PSInputNum = 0; 623 624 // Insert the hidden sret parameter if the return value won't fit in the 625 // return registers. 626 if (!FLI.CanLowerReturn) 627 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 628 629 for (auto &Arg : F.args()) { 630 if (DL.getTypeStoreSize(Arg.getType()) == 0) 631 continue; 632 633 const bool InReg = Arg.hasAttribute(Attribute::InReg); 634 635 // SGPR arguments to functions not implemented. 636 if (!IsGraphics && InReg) 637 return false; 638 639 if (Arg.hasAttribute(Attribute::SwiftSelf) || 640 Arg.hasAttribute(Attribute::SwiftError) || 641 Arg.hasAttribute(Attribute::Nest)) 642 return false; 643 644 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 645 const bool ArgUsed = !Arg.use_empty(); 646 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 647 648 if (!SkipArg) { 649 Info->markPSInputAllocated(PSInputNum); 650 if (ArgUsed) 651 Info->markPSInputEnabled(PSInputNum); 652 } 653 654 ++PSInputNum; 655 656 if (SkipArg) { 657 for (Register R : VRegs[Idx]) 658 B.buildUndef(R); 659 660 ++Idx; 661 continue; 662 } 663 } 664 665 ArgInfo OrigArg(VRegs[Idx], Arg, Idx); 666 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 667 setArgFlags(OrigArg, OrigArgIdx, DL, F); 668 669 splitToValueTypes(OrigArg, SplitArgs, DL, CC); 670 ++Idx; 671 } 672 673 // At least one interpolation mode must be enabled or else the GPU will 674 // hang. 675 // 676 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 677 // set PSInputAddr, the user wants to enable some bits after the compilation 678 // based on run-time states. Since we can't know what the final PSInputEna 679 // will look like, so we shouldn't do anything here and the user should take 680 // responsibility for the correct programming. 681 // 682 // Otherwise, the following restrictions apply: 683 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 684 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 685 // enabled too. 686 if (CC == CallingConv::AMDGPU_PS) { 687 if ((Info->getPSInputAddr() & 0x7F) == 0 || 688 ((Info->getPSInputAddr() & 0xF) == 0 && 689 Info->isPSInputAllocated(11))) { 690 CCInfo.AllocateReg(AMDGPU::VGPR0); 691 CCInfo.AllocateReg(AMDGPU::VGPR1); 692 Info->markPSInputAllocated(0); 693 Info->markPSInputEnabled(0); 694 } 695 696 if (Subtarget.isAmdPalOS()) { 697 // For isAmdPalOS, the user does not enable some bits after compilation 698 // based on run-time states; the register values being generated here are 699 // the final ones set in hardware. Therefore we need to apply the 700 // workaround to PSInputAddr and PSInputEnable together. (The case where 701 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 702 // set up an input arg for a particular interpolation mode, but nothing 703 // uses that input arg. Really we should have an earlier pass that removes 704 // such an arg.) 705 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 706 if ((PsInputBits & 0x7F) == 0 || 707 ((PsInputBits & 0xF) == 0 && 708 (PsInputBits >> 11 & 1))) 709 Info->markPSInputEnabled(llvm::countr_zero(Info->getPSInputAddr())); 710 } 711 } 712 713 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 714 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 715 716 if (!MBB.empty()) 717 B.setInstr(*MBB.begin()); 718 719 if (!IsEntryFunc && !IsGraphics) { 720 // For the fixed ABI, pass workitem IDs in the last argument register. 721 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 722 } 723 724 IncomingValueAssigner Assigner(AssignFn); 725 if (!determineAssignments(Assigner, SplitArgs, CCInfo)) 726 return false; 727 728 FormalArgHandler Handler(B, MRI); 729 if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) 730 return false; 731 732 uint64_t StackSize = Assigner.StackSize; 733 734 // Start adding system SGPRs. 735 if (IsEntryFunc) { 736 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 737 } else { 738 if (!Subtarget.enableFlatScratch()) 739 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 740 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 741 } 742 743 // When we tail call, we need to check if the callee's arguments will fit on 744 // the caller's stack. So, whenever we lower formal arguments, we should keep 745 // track of this information, since we might lower a tail call in this 746 // function later. 747 Info->setBytesInStackArgArea(StackSize); 748 749 // Move back to the end of the basic block. 750 B.setMBB(MBB); 751 752 return true; 753 } 754 755 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 756 CCState &CCInfo, 757 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 758 CallLoweringInfo &Info) const { 759 MachineFunction &MF = MIRBuilder.getMF(); 760 761 // If there's no call site, this doesn't correspond to a call from the IR and 762 // doesn't need implicit inputs. 763 if (!Info.CB) 764 return true; 765 766 const AMDGPUFunctionArgInfo *CalleeArgInfo 767 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 768 769 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 770 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 771 772 773 // TODO: Unify with private memory register handling. This is complicated by 774 // the fact that at least in kernels, the input argument is not necessarily 775 // in the same location as the input. 776 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 777 AMDGPUFunctionArgInfo::DISPATCH_PTR, 778 AMDGPUFunctionArgInfo::QUEUE_PTR, 779 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 780 AMDGPUFunctionArgInfo::DISPATCH_ID, 781 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 782 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 783 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 784 AMDGPUFunctionArgInfo::LDS_KERNEL_ID, 785 }; 786 787 static constexpr StringLiteral ImplicitAttrNames[] = { 788 "amdgpu-no-dispatch-ptr", 789 "amdgpu-no-queue-ptr", 790 "amdgpu-no-implicitarg-ptr", 791 "amdgpu-no-dispatch-id", 792 "amdgpu-no-workgroup-id-x", 793 "amdgpu-no-workgroup-id-y", 794 "amdgpu-no-workgroup-id-z", 795 "amdgpu-no-lds-kernel-id", 796 }; 797 798 MachineRegisterInfo &MRI = MF.getRegInfo(); 799 800 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 801 const AMDGPULegalizerInfo *LI 802 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 803 804 unsigned I = 0; 805 for (auto InputID : InputRegs) { 806 const ArgDescriptor *OutgoingArg; 807 const TargetRegisterClass *ArgRC; 808 LLT ArgTy; 809 810 // If the callee does not use the attribute value, skip copying the value. 811 if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) 812 continue; 813 814 std::tie(OutgoingArg, ArgRC, ArgTy) = 815 CalleeArgInfo->getPreloadedValue(InputID); 816 if (!OutgoingArg) 817 continue; 818 819 const ArgDescriptor *IncomingArg; 820 const TargetRegisterClass *IncomingArgRC; 821 std::tie(IncomingArg, IncomingArgRC, ArgTy) = 822 CallerArgInfo.getPreloadedValue(InputID); 823 assert(IncomingArgRC == ArgRC); 824 825 Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 826 827 if (IncomingArg) { 828 LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 829 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { 830 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 831 } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) { 832 std::optional<uint32_t> Id = 833 AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction()); 834 if (Id) { 835 MIRBuilder.buildConstant(InputReg, *Id); 836 } else { 837 MIRBuilder.buildUndef(InputReg); 838 } 839 } else { 840 // We may have proven the input wasn't needed, although the ABI is 841 // requiring it. We just need to allocate the register appropriately. 842 MIRBuilder.buildUndef(InputReg); 843 } 844 845 if (OutgoingArg->isRegister()) { 846 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 847 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 848 report_fatal_error("failed to allocate implicit input argument"); 849 } else { 850 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 851 return false; 852 } 853 } 854 855 // Pack workitem IDs into a single register or pass it as is if already 856 // packed. 857 const ArgDescriptor *OutgoingArg; 858 const TargetRegisterClass *ArgRC; 859 LLT ArgTy; 860 861 std::tie(OutgoingArg, ArgRC, ArgTy) = 862 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 863 if (!OutgoingArg) 864 std::tie(OutgoingArg, ArgRC, ArgTy) = 865 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 866 if (!OutgoingArg) 867 std::tie(OutgoingArg, ArgRC, ArgTy) = 868 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 869 if (!OutgoingArg) 870 return false; 871 872 auto WorkitemIDX = 873 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 874 auto WorkitemIDY = 875 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 876 auto WorkitemIDZ = 877 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 878 879 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 880 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 881 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 882 const LLT S32 = LLT::scalar(32); 883 884 const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 885 const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 886 const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 887 888 // If incoming ids are not packed we need to pack them. 889 // FIXME: Should consider known workgroup size to eliminate known 0 cases. 890 Register InputReg; 891 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 892 NeedWorkItemIDX) { 893 if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { 894 InputReg = MRI.createGenericVirtualRegister(S32); 895 LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 896 std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 897 } else { 898 InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); 899 } 900 } 901 902 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 903 NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { 904 Register Y = MRI.createGenericVirtualRegister(S32); 905 LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 906 std::get<2>(WorkitemIDY)); 907 908 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 909 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 910 } 911 912 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 913 NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { 914 Register Z = MRI.createGenericVirtualRegister(S32); 915 LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 916 std::get<2>(WorkitemIDZ)); 917 918 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 919 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 920 } 921 922 if (!InputReg && 923 (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 924 InputReg = MRI.createGenericVirtualRegister(S32); 925 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { 926 // We're in a situation where the outgoing function requires the workitem 927 // ID, but the calling function does not have it (e.g a graphics function 928 // calling a C calling convention function). This is illegal, but we need 929 // to produce something. 930 MIRBuilder.buildUndef(InputReg); 931 } else { 932 // Workitem ids are already packed, any of present incoming arguments will 933 // carry all required fields. 934 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 935 IncomingArgX ? *IncomingArgX : 936 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 937 LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 938 &AMDGPU::VGPR_32RegClass, S32); 939 } 940 } 941 942 if (OutgoingArg->isRegister()) { 943 if (InputReg) 944 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 945 946 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 947 report_fatal_error("failed to allocate implicit input argument"); 948 } else { 949 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 950 return false; 951 } 952 953 return true; 954 } 955 956 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 957 /// CC. 958 static std::pair<CCAssignFn *, CCAssignFn *> 959 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 960 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 961 } 962 963 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 964 bool IsTailCall, bool isWave32, 965 CallingConv::ID CC) { 966 // For calls to amdgpu_cs_chain functions, the address is known to be uniform. 967 assert((AMDGPU::isChainCC(CC) || !IsIndirect || !IsTailCall) && 968 "Indirect calls can't be tail calls, " 969 "because the address can be divergent"); 970 if (!IsTailCall) 971 return AMDGPU::G_SI_CALL; 972 973 if (AMDGPU::isChainCC(CC)) 974 return isWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64; 975 976 return CC == CallingConv::AMDGPU_Gfx ? AMDGPU::SI_TCRETURN_GFX : 977 AMDGPU::SI_TCRETURN; 978 } 979 980 // Add operands to call instruction to track the callee. 981 static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 982 MachineIRBuilder &MIRBuilder, 983 AMDGPUCallLowering::CallLoweringInfo &Info) { 984 if (Info.Callee.isReg()) { 985 CallInst.addReg(Info.Callee.getReg()); 986 CallInst.addImm(0); 987 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 988 // The call lowering lightly assumed we can directly encode a call target in 989 // the instruction, which is not the case. Materialize the address here. 990 const GlobalValue *GV = Info.Callee.getGlobal(); 991 auto Ptr = MIRBuilder.buildGlobalValue( 992 LLT::pointer(GV->getAddressSpace(), 64), GV); 993 CallInst.addReg(Ptr.getReg(0)); 994 CallInst.add(Info.Callee); 995 } else 996 return false; 997 998 return true; 999 } 1000 1001 bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( 1002 CallLoweringInfo &Info, MachineFunction &MF, 1003 SmallVectorImpl<ArgInfo> &InArgs) const { 1004 const Function &CallerF = MF.getFunction(); 1005 CallingConv::ID CalleeCC = Info.CallConv; 1006 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1007 1008 // If the calling conventions match, then everything must be the same. 1009 if (CalleeCC == CallerCC) 1010 return true; 1011 1012 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1013 1014 // Make sure that the caller and callee preserve all of the same registers. 1015 auto TRI = ST.getRegisterInfo(); 1016 1017 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1018 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 1019 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 1020 return false; 1021 1022 // Check if the caller and callee will handle arguments in the same way. 1023 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1024 CCAssignFn *CalleeAssignFnFixed; 1025 CCAssignFn *CalleeAssignFnVarArg; 1026 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 1027 getAssignFnsForCC(CalleeCC, TLI); 1028 1029 CCAssignFn *CallerAssignFnFixed; 1030 CCAssignFn *CallerAssignFnVarArg; 1031 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 1032 getAssignFnsForCC(CallerCC, TLI); 1033 1034 // FIXME: We are not accounting for potential differences in implicitly passed 1035 // inputs, but only the fixed ABI is supported now anyway. 1036 IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, 1037 CalleeAssignFnVarArg); 1038 IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, 1039 CallerAssignFnVarArg); 1040 return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); 1041 } 1042 1043 bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( 1044 CallLoweringInfo &Info, MachineFunction &MF, 1045 SmallVectorImpl<ArgInfo> &OutArgs) const { 1046 // If there are no outgoing arguments, then we are done. 1047 if (OutArgs.empty()) 1048 return true; 1049 1050 const Function &CallerF = MF.getFunction(); 1051 CallingConv::ID CalleeCC = Info.CallConv; 1052 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1053 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1054 1055 CCAssignFn *AssignFnFixed; 1056 CCAssignFn *AssignFnVarArg; 1057 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1058 1059 // We have outgoing arguments. Make sure that we can tail call with them. 1060 SmallVector<CCValAssign, 16> OutLocs; 1061 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 1062 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1063 1064 if (!determineAssignments(Assigner, OutArgs, OutInfo)) { 1065 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 1066 return false; 1067 } 1068 1069 // Make sure that they can fit on the caller's stack. 1070 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1071 if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) { 1072 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 1073 return false; 1074 } 1075 1076 // Verify that the parameters in callee-saved registers match. 1077 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1078 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1079 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 1080 MachineRegisterInfo &MRI = MF.getRegInfo(); 1081 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); 1082 } 1083 1084 /// Return true if the calling convention is one that we can guarantee TCO for. 1085 static bool canGuaranteeTCO(CallingConv::ID CC) { 1086 return CC == CallingConv::Fast; 1087 } 1088 1089 /// Return true if we might ever do TCO for calls with this calling convention. 1090 static bool mayTailCallThisCC(CallingConv::ID CC) { 1091 switch (CC) { 1092 case CallingConv::C: 1093 case CallingConv::AMDGPU_Gfx: 1094 return true; 1095 default: 1096 return canGuaranteeTCO(CC); 1097 } 1098 } 1099 1100 bool AMDGPUCallLowering::isEligibleForTailCallOptimization( 1101 MachineIRBuilder &B, CallLoweringInfo &Info, 1102 SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { 1103 // Must pass all target-independent checks in order to tail call optimize. 1104 if (!Info.IsTailCall) 1105 return false; 1106 1107 // Indirect calls can't be tail calls, because the address can be divergent. 1108 // TODO Check divergence info if the call really is divergent. 1109 if (Info.Callee.isReg()) 1110 return false; 1111 1112 MachineFunction &MF = B.getMF(); 1113 const Function &CallerF = MF.getFunction(); 1114 CallingConv::ID CalleeCC = Info.CallConv; 1115 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1116 1117 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1118 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1119 // Kernels aren't callable, and don't have a live in return address so it 1120 // doesn't make sense to do a tail call with entry functions. 1121 if (!CallerPreserved) 1122 return false; 1123 1124 if (!mayTailCallThisCC(CalleeCC)) { 1125 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 1126 return false; 1127 } 1128 1129 if (any_of(CallerF.args(), [](const Argument &A) { 1130 return A.hasByValAttr() || A.hasSwiftErrorAttr(); 1131 })) { 1132 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " 1133 "or swifterror arguments\n"); 1134 return false; 1135 } 1136 1137 // If we have -tailcallopt, then we're done. 1138 if (MF.getTarget().Options.GuaranteedTailCallOpt) 1139 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 1140 1141 // Verify that the incoming and outgoing arguments from the callee are 1142 // safe to tail call. 1143 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 1144 LLVM_DEBUG( 1145 dbgs() 1146 << "... Caller and callee have incompatible calling conventions.\n"); 1147 return false; 1148 } 1149 1150 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 1151 return false; 1152 1153 LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); 1154 return true; 1155 } 1156 1157 // Insert outgoing implicit arguments for a call, by inserting copies to the 1158 // implicit argument registers and adding the necessary implicit uses to the 1159 // call instruction. 1160 void AMDGPUCallLowering::handleImplicitCallArguments( 1161 MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, 1162 const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, 1163 CallingConv::ID CalleeCC, 1164 ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { 1165 if (!ST.enableFlatScratch()) { 1166 // Insert copies for the SRD. In the HSA case, this should be an identity 1167 // copy. 1168 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), 1169 FuncInfo.getScratchRSrcReg()); 1170 1171 auto CalleeRSrcReg = AMDGPU::isChainCC(CalleeCC) 1172 ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51 1173 : AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3; 1174 1175 MIRBuilder.buildCopy(CalleeRSrcReg, ScratchRSrcReg); 1176 CallInst.addReg(CalleeRSrcReg, RegState::Implicit); 1177 } 1178 1179 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 1180 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 1181 CallInst.addReg(ArgReg.first, RegState::Implicit); 1182 } 1183 } 1184 1185 bool AMDGPUCallLowering::lowerTailCall( 1186 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 1187 SmallVectorImpl<ArgInfo> &OutArgs) const { 1188 MachineFunction &MF = MIRBuilder.getMF(); 1189 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1190 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1191 const Function &F = MF.getFunction(); 1192 MachineRegisterInfo &MRI = MF.getRegInfo(); 1193 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1194 1195 // True when we're tail calling, but without -tailcallopt. 1196 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 1197 1198 // Find out which ABI gets to decide where things go. 1199 CallingConv::ID CalleeCC = Info.CallConv; 1200 CCAssignFn *AssignFnFixed; 1201 CCAssignFn *AssignFnVarArg; 1202 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1203 1204 MachineInstrBuilder CallSeqStart; 1205 if (!IsSibCall) 1206 CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); 1207 1208 unsigned Opc = 1209 getCallOpcode(MF, Info.Callee.isReg(), true, ST.isWave32(), CalleeCC); 1210 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1211 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1212 return false; 1213 1214 // Byte offset for the tail call. When we are sibcalling, this will always 1215 // be 0. 1216 MIB.addImm(0); 1217 1218 // If this is a chain call, we need to pass in the EXEC mask. 1219 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1220 if (AMDGPU::isChainCC(Info.CallConv)) { 1221 ArgInfo ExecArg = Info.OrigArgs[1]; 1222 assert(ExecArg.Regs.size() == 1 && "Too many regs for EXEC"); 1223 1224 if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) 1225 return false; 1226 1227 if (auto CI = dyn_cast<ConstantInt>(ExecArg.OrigValue)) { 1228 MIB.addImm(CI->getSExtValue()); 1229 } else { 1230 MIB.addReg(ExecArg.Regs[0]); 1231 unsigned Idx = MIB->getNumOperands() - 1; 1232 MIB->getOperand(Idx).setReg(constrainOperandRegClass( 1233 MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 1234 MIB->getDesc(), MIB->getOperand(Idx), Idx)); 1235 } 1236 } 1237 1238 // Tell the call which registers are clobbered. 1239 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 1240 MIB.addRegMask(Mask); 1241 1242 // FPDiff is the byte offset of the call's argument area from the callee's. 1243 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1244 // by this amount for a tail call. In a sibling call it must be 0 because the 1245 // caller will deallocate the entire stack and the callee still expects its 1246 // arguments to begin at SP+0. 1247 int FPDiff = 0; 1248 1249 // This will be 0 for sibcalls, potentially nonzero for tail calls produced 1250 // by -tailcallopt. For sibcalls, the memory operands for the call are 1251 // already available in the caller's incoming argument space. 1252 unsigned NumBytes = 0; 1253 if (!IsSibCall) { 1254 // We aren't sibcalling, so we need to compute FPDiff. We need to do this 1255 // before handling assignments, because FPDiff must be known for memory 1256 // arguments. 1257 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1258 SmallVector<CCValAssign, 16> OutLocs; 1259 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 1260 1261 // FIXME: Not accounting for callee implicit inputs 1262 OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); 1263 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) 1264 return false; 1265 1266 // The callee will pop the argument stack as a tail call. Thus, we must 1267 // keep it 16-byte aligned. 1268 NumBytes = alignTo(OutInfo.getStackSize(), ST.getStackAlignment()); 1269 1270 // FPDiff will be negative if this tail call requires more space than we 1271 // would automatically have in our incoming argument space. Positive if we 1272 // actually shrink the stack. 1273 FPDiff = NumReusableBytes - NumBytes; 1274 1275 // The stack pointer must be 16-byte aligned at all times it's used for a 1276 // memory operation, which in practice means at *all* times and in 1277 // particular across call boundaries. Therefore our own arguments started at 1278 // a 16-byte aligned SP and the delta applied for the tail call should 1279 // satisfy the same constraint. 1280 assert(isAligned(ST.getStackAlignment(), FPDiff) && 1281 "unaligned stack on tail call"); 1282 } 1283 1284 SmallVector<CCValAssign, 16> ArgLocs; 1285 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1286 1287 // We could pass MIB and directly add the implicit uses to the call 1288 // now. However, as an aesthetic choice, place implicit argument operands 1289 // after the ordinary user argument registers. 1290 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1291 1292 if (Info.CallConv != CallingConv::AMDGPU_Gfx && 1293 !AMDGPU::isChainCC(Info.CallConv)) { 1294 // With a fixed ABI, allocate fixed registers before user arguments. 1295 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1296 return false; 1297 } 1298 1299 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1300 1301 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1302 return false; 1303 1304 // Do the actual argument marshalling. 1305 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); 1306 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1307 return false; 1308 1309 handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, CalleeCC, 1310 ImplicitArgRegs); 1311 1312 // If we have -tailcallopt, we need to adjust the stack. We'll do the call 1313 // sequence start and end here. 1314 if (!IsSibCall) { 1315 MIB->getOperand(1).setImm(FPDiff); 1316 CallSeqStart.addImm(NumBytes).addImm(0); 1317 // End the call sequence *before* emitting the call. Normally, we would 1318 // tidy the frame up after the call. However, here, we've laid out the 1319 // parameters so that when SP is reset, they will be in the correct 1320 // location. 1321 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); 1322 } 1323 1324 // Now we can add the actual call instruction to the correct basic block. 1325 MIRBuilder.insertInstr(MIB); 1326 1327 // If Callee is a reg, since it is used by a target specific 1328 // instruction, it must have a register class matching the 1329 // constraint of that instruction. 1330 1331 // FIXME: We should define regbankselectable call instructions to handle 1332 // divergent call targets. 1333 if (MIB->getOperand(0).isReg()) { 1334 MIB->getOperand(0).setReg(constrainOperandRegClass( 1335 MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 1336 MIB->getDesc(), MIB->getOperand(0), 0)); 1337 } 1338 1339 MF.getFrameInfo().setHasTailCall(); 1340 Info.LoweredTailCall = true; 1341 return true; 1342 } 1343 1344 /// Lower a call to the @llvm.amdgcn.cs.chain intrinsic. 1345 bool AMDGPUCallLowering::lowerChainCall(MachineIRBuilder &MIRBuilder, 1346 CallLoweringInfo &Info) const { 1347 ArgInfo Callee = Info.OrigArgs[0]; 1348 ArgInfo SGPRArgs = Info.OrigArgs[2]; 1349 ArgInfo VGPRArgs = Info.OrigArgs[3]; 1350 ArgInfo Flags = Info.OrigArgs[4]; 1351 1352 assert(cast<ConstantInt>(Flags.OrigValue)->isZero() && 1353 "Non-zero flags aren't supported yet."); 1354 assert(Info.OrigArgs.size() == 5 && "Additional args aren't supported yet."); 1355 1356 MachineFunction &MF = MIRBuilder.getMF(); 1357 const Function &F = MF.getFunction(); 1358 const DataLayout &DL = F.getParent()->getDataLayout(); 1359 1360 // The function to jump to is actually the first argument, so we'll change the 1361 // Callee and other info to match that before using our existing helper. 1362 const Value *CalleeV = Callee.OrigValue->stripPointerCasts(); 1363 if (const Function *F = dyn_cast<Function>(CalleeV)) { 1364 Info.Callee = MachineOperand::CreateGA(F, 0); 1365 Info.CallConv = F->getCallingConv(); 1366 } else { 1367 assert(Callee.Regs.size() == 1 && "Too many regs for the callee"); 1368 Info.Callee = MachineOperand::CreateReg(Callee.Regs[0], false); 1369 Info.CallConv = CallingConv::AMDGPU_CS_Chain; // amdgpu_cs_chain_preserve 1370 // behaves the same here. 1371 } 1372 1373 // The function that we're calling cannot be vararg (only the intrinsic is). 1374 Info.IsVarArg = false; 1375 1376 assert(std::all_of(SGPRArgs.Flags.begin(), SGPRArgs.Flags.end(), 1377 [](ISD::ArgFlagsTy F) { return F.isInReg(); }) && 1378 "SGPR arguments should be marked inreg"); 1379 assert(std::none_of(VGPRArgs.Flags.begin(), VGPRArgs.Flags.end(), 1380 [](ISD::ArgFlagsTy F) { return F.isInReg(); }) && 1381 "VGPR arguments should not be marked inreg"); 1382 1383 SmallVector<ArgInfo, 8> OutArgs; 1384 splitToValueTypes(SGPRArgs, OutArgs, DL, Info.CallConv); 1385 splitToValueTypes(VGPRArgs, OutArgs, DL, Info.CallConv); 1386 1387 Info.IsMustTailCall = true; 1388 return lowerTailCall(MIRBuilder, Info, OutArgs); 1389 } 1390 1391 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 1392 CallLoweringInfo &Info) const { 1393 if (Function *F = Info.CB->getCalledFunction()) 1394 if (F->isIntrinsic()) { 1395 assert(F->getIntrinsicID() == Intrinsic::amdgcn_cs_chain && 1396 "Unexpected intrinsic"); 1397 return lowerChainCall(MIRBuilder, Info); 1398 } 1399 1400 if (Info.IsVarArg) { 1401 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 1402 return false; 1403 } 1404 1405 MachineFunction &MF = MIRBuilder.getMF(); 1406 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1407 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1408 1409 const Function &F = MF.getFunction(); 1410 MachineRegisterInfo &MRI = MF.getRegInfo(); 1411 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1412 const DataLayout &DL = F.getParent()->getDataLayout(); 1413 1414 SmallVector<ArgInfo, 8> OutArgs; 1415 for (auto &OrigArg : Info.OrigArgs) 1416 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 1417 1418 SmallVector<ArgInfo, 8> InArgs; 1419 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 1420 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 1421 1422 // If we can lower as a tail call, do that instead. 1423 bool CanTailCallOpt = 1424 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 1425 1426 // We must emit a tail call if we have musttail. 1427 if (Info.IsMustTailCall && !CanTailCallOpt) { 1428 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 1429 return false; 1430 } 1431 1432 Info.IsTailCall = CanTailCallOpt; 1433 if (CanTailCallOpt) 1434 return lowerTailCall(MIRBuilder, Info, OutArgs); 1435 1436 // Find out which ABI gets to decide where things go. 1437 CCAssignFn *AssignFnFixed; 1438 CCAssignFn *AssignFnVarArg; 1439 std::tie(AssignFnFixed, AssignFnVarArg) = 1440 getAssignFnsForCC(Info.CallConv, TLI); 1441 1442 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 1443 .addImm(0) 1444 .addImm(0); 1445 1446 // Create a temporarily-floating call instruction so we can add the implicit 1447 // uses of arg registers. 1448 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false, ST.isWave32(), 1449 Info.CallConv); 1450 1451 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1452 MIB.addDef(TRI->getReturnAddressReg(MF)); 1453 1454 if (!Info.IsConvergent) 1455 MIB.setMIFlag(MachineInstr::NoConvergent); 1456 1457 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1458 return false; 1459 1460 // Tell the call which registers are clobbered. 1461 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 1462 MIB.addRegMask(Mask); 1463 1464 SmallVector<CCValAssign, 16> ArgLocs; 1465 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1466 1467 // We could pass MIB and directly add the implicit uses to the call 1468 // now. However, as an aesthetic choice, place implicit argument operands 1469 // after the ordinary user argument registers. 1470 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1471 1472 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1473 // With a fixed ABI, allocate fixed registers before user arguments. 1474 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1475 return false; 1476 } 1477 1478 // Do the actual argument marshalling. 1479 SmallVector<Register, 8> PhysRegs; 1480 1481 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1482 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1483 return false; 1484 1485 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); 1486 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1487 return false; 1488 1489 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1490 1491 handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv, 1492 ImplicitArgRegs); 1493 1494 // Get a count of how many bytes are to be pushed on the stack. 1495 unsigned NumBytes = CCInfo.getStackSize(); 1496 1497 // If Callee is a reg, since it is used by a target specific 1498 // instruction, it must have a register class matching the 1499 // constraint of that instruction. 1500 1501 // FIXME: We should define regbankselectable call instructions to handle 1502 // divergent call targets. 1503 if (MIB->getOperand(1).isReg()) { 1504 MIB->getOperand(1).setReg(constrainOperandRegClass( 1505 MF, *TRI, MRI, *ST.getInstrInfo(), 1506 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 1507 1)); 1508 } 1509 1510 // Now we can add the actual call instruction to the correct position. 1511 MIRBuilder.insertInstr(MIB); 1512 1513 // Finally we can copy the returned value back into its virtual-register. In 1514 // symmetry with the arguments, the physical register must be an 1515 // implicit-define of the call instruction. 1516 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 1517 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 1518 Info.IsVarArg); 1519 IncomingValueAssigner Assigner(RetAssignFn); 1520 CallReturnHandler Handler(MIRBuilder, MRI, MIB); 1521 if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, 1522 Info.CallConv, Info.IsVarArg)) 1523 return false; 1524 } 1525 1526 uint64_t CalleePopBytes = NumBytes; 1527 1528 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 1529 .addImm(0) 1530 .addImm(CalleePopBytes); 1531 1532 if (!Info.CanLowerReturn) { 1533 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 1534 Info.DemoteRegister, Info.DemoteStackIndex); 1535 } 1536 1537 return true; 1538 } 1539