1 //===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the RISC-V implementation of the TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVRegisterInfo.h" 14 #include "RISCV.h" 15 #include "RISCVMachineFunctionInfo.h" 16 #include "RISCVSubtarget.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/RegisterScavenging.h" 22 #include "llvm/CodeGen/TargetFrameLowering.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/IR/DebugInfoMetadata.h" 25 #include "llvm/Support/ErrorHandling.h" 26 27 #define GET_REGINFO_TARGET_DESC 28 #include "RISCVGenRegisterInfo.inc" 29 30 using namespace llvm; 31 32 static cl::opt<bool> 33 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, 34 cl::init(false), 35 cl::desc("Disable two address hints for register " 36 "allocation")); 37 38 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive"); 39 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive"); 40 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive"); 41 static_assert(RISCV::F31_H == RISCV::F0_H + 31, 42 "Register list not consecutive"); 43 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive"); 44 static_assert(RISCV::F31_F == RISCV::F0_F + 31, 45 "Register list not consecutive"); 46 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive"); 47 static_assert(RISCV::F31_D == RISCV::F0_D + 31, 48 "Register list not consecutive"); 49 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive"); 50 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive"); 51 52 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode) 53 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0, 54 /*PC*/0, HwMode) {} 55 56 const MCPhysReg * 57 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 58 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>(); 59 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 60 return CSR_NoRegs_SaveList; 61 if (MF->getFunction().hasFnAttribute("interrupt")) { 62 if (Subtarget.hasStdExtD()) 63 return CSR_XLEN_F64_Interrupt_SaveList; 64 if (Subtarget.hasStdExtF()) 65 return CSR_XLEN_F32_Interrupt_SaveList; 66 return CSR_Interrupt_SaveList; 67 } 68 69 switch (Subtarget.getTargetABI()) { 70 default: 71 llvm_unreachable("Unrecognized ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_LP64: 74 return CSR_ILP32_LP64_SaveList; 75 case RISCVABI::ABI_ILP32F: 76 case RISCVABI::ABI_LP64F: 77 return CSR_ILP32F_LP64F_SaveList; 78 case RISCVABI::ABI_ILP32D: 79 case RISCVABI::ABI_LP64D: 80 return CSR_ILP32D_LP64D_SaveList; 81 } 82 } 83 84 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 85 const RISCVFrameLowering *TFI = getFrameLowering(MF); 86 BitVector Reserved(getNumRegs()); 87 88 // Mark any registers requested to be reserved as such 89 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) { 90 if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg)) 91 markSuperRegs(Reserved, Reg); 92 } 93 94 // Use markSuperRegs to ensure any register aliases are also reserved 95 markSuperRegs(Reserved, RISCV::X0); // zero 96 markSuperRegs(Reserved, RISCV::X2); // sp 97 markSuperRegs(Reserved, RISCV::X3); // gp 98 markSuperRegs(Reserved, RISCV::X4); // tp 99 if (TFI->hasFP(MF)) 100 markSuperRegs(Reserved, RISCV::X8); // fp 101 // Reserve the base register if we need to realign the stack and allocate 102 // variable-sized objects at runtime. 103 if (TFI->hasBP(MF)) 104 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp 105 106 // Additionally reserve dummy register used to form the register pair 107 // beginning with 'x0' for instructions that take register pairs. 108 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0); 109 110 // V registers for code generation. We handle them manually. 111 markSuperRegs(Reserved, RISCV::VL); 112 markSuperRegs(Reserved, RISCV::VTYPE); 113 markSuperRegs(Reserved, RISCV::VXSAT); 114 markSuperRegs(Reserved, RISCV::VXRM); 115 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant) 116 117 // Floating point environment registers. 118 markSuperRegs(Reserved, RISCV::FRM); 119 markSuperRegs(Reserved, RISCV::FFLAGS); 120 121 assert(checkAllSuperRegsMarked(Reserved)); 122 return Reserved; 123 } 124 125 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF, 126 MCRegister PhysReg) const { 127 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg); 128 } 129 130 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const { 131 return CSR_NoRegs_RegMask; 132 } 133 134 // Frame indexes representing locations of CSRs which are given a fixed location 135 // by save/restore libcalls. 136 static const std::pair<unsigned, int> FixedCSRFIMap[] = { 137 {/*ra*/ RISCV::X1, -1}, 138 {/*s0*/ RISCV::X8, -2}, 139 {/*s1*/ RISCV::X9, -3}, 140 {/*s2*/ RISCV::X18, -4}, 141 {/*s3*/ RISCV::X19, -5}, 142 {/*s4*/ RISCV::X20, -6}, 143 {/*s5*/ RISCV::X21, -7}, 144 {/*s6*/ RISCV::X22, -8}, 145 {/*s7*/ RISCV::X23, -9}, 146 {/*s8*/ RISCV::X24, -10}, 147 {/*s9*/ RISCV::X25, -11}, 148 {/*s10*/ RISCV::X26, -12}, 149 {/*s11*/ RISCV::X27, -13} 150 }; 151 152 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, 153 Register Reg, 154 int &FrameIdx) const { 155 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 156 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF)) 157 return false; 158 159 const auto *FII = 160 llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; }); 161 if (FII == std::end(FixedCSRFIMap)) 162 return false; 163 164 FrameIdx = FII->second; 165 return true; 166 } 167 168 void RISCVRegisterInfo::adjustReg(MachineBasicBlock &MBB, 169 MachineBasicBlock::iterator II, 170 const DebugLoc &DL, Register DestReg, 171 Register SrcReg, StackOffset Offset, 172 MachineInstr::MIFlag Flag, 173 MaybeAlign RequiredAlign) const { 174 175 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable()) 176 return; 177 178 MachineFunction &MF = *MBB.getParent(); 179 MachineRegisterInfo &MRI = MF.getRegInfo(); 180 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>(); 181 const RISCVInstrInfo *TII = ST.getInstrInfo(); 182 183 bool KillSrcReg = false; 184 185 if (Offset.getScalable()) { 186 unsigned ScalableAdjOpc = RISCV::ADD; 187 int64_t ScalableValue = Offset.getScalable(); 188 if (ScalableValue < 0) { 189 ScalableValue = -ScalableValue; 190 ScalableAdjOpc = RISCV::SUB; 191 } 192 // Get vlenb and multiply vlen with the number of vector registers. 193 Register ScratchReg = DestReg; 194 if (DestReg == SrcReg) 195 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 196 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag); 197 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg) 198 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill) 199 .setMIFlag(Flag); 200 SrcReg = DestReg; 201 KillSrcReg = true; 202 } 203 204 int64_t Val = Offset.getFixed(); 205 if (DestReg == SrcReg && Val == 0) 206 return; 207 208 const uint64_t Align = RequiredAlign.valueOrOne().value(); 209 210 if (isInt<12>(Val)) { 211 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 212 .addReg(SrcReg, getKillRegState(KillSrcReg)) 213 .addImm(Val) 214 .setMIFlag(Flag); 215 return; 216 } 217 218 // Try to split the offset across two ADDIs. We need to keep the intermediate 219 // result aligned after each ADDI. We need to determine the maximum value we 220 // can put in each ADDI. In the negative direction, we can use -2048 which is 221 // always sufficiently aligned. In the positive direction, we need to find the 222 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be 223 // created with LUI. 224 assert(Align < 2048 && "Required alignment too large"); 225 int64_t MaxPosAdjStep = 2048 - Align; 226 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) { 227 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep; 228 Val -= FirstAdj; 229 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 230 .addReg(SrcReg, getKillRegState(KillSrcReg)) 231 .addImm(FirstAdj) 232 .setMIFlag(Flag); 233 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg) 234 .addReg(DestReg, RegState::Kill) 235 .addImm(Val) 236 .setMIFlag(Flag); 237 return; 238 } 239 240 unsigned Opc = RISCV::ADD; 241 if (Val < 0) { 242 Val = -Val; 243 Opc = RISCV::SUB; 244 } 245 246 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 247 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag); 248 BuildMI(MBB, II, DL, TII->get(Opc), DestReg) 249 .addReg(SrcReg, getKillRegState(KillSrcReg)) 250 .addReg(ScratchReg, RegState::Kill) 251 .setMIFlag(Flag); 252 } 253 254 // Split a VSPILLx_Mx pseudo into multiple whole register stores separated by 255 // LMUL*VLENB bytes. 256 void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const { 257 DebugLoc DL = II->getDebugLoc(); 258 MachineBasicBlock &MBB = *II->getParent(); 259 MachineFunction &MF = *MBB.getParent(); 260 MachineRegisterInfo &MRI = MF.getRegInfo(); 261 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 262 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 263 264 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); 265 unsigned NF = ZvlssegInfo->first; 266 unsigned LMUL = ZvlssegInfo->second; 267 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); 268 unsigned Opcode, SubRegIdx; 269 switch (LMUL) { 270 default: 271 llvm_unreachable("LMUL must be 1, 2, or 4."); 272 case 1: 273 Opcode = RISCV::VS1R_V; 274 SubRegIdx = RISCV::sub_vrm1_0; 275 break; 276 case 2: 277 Opcode = RISCV::VS2R_V; 278 SubRegIdx = RISCV::sub_vrm2_0; 279 break; 280 case 4: 281 Opcode = RISCV::VS4R_V; 282 SubRegIdx = RISCV::sub_vrm4_0; 283 break; 284 } 285 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 286 "Unexpected subreg numbering"); 287 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 288 "Unexpected subreg numbering"); 289 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 290 "Unexpected subreg numbering"); 291 292 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); 293 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); 294 uint32_t ShiftAmount = Log2_32(LMUL); 295 if (ShiftAmount != 0) 296 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) 297 .addReg(VL) 298 .addImm(ShiftAmount); 299 300 Register SrcReg = II->getOperand(0).getReg(); 301 Register Base = II->getOperand(1).getReg(); 302 bool IsBaseKill = II->getOperand(1).isKill(); 303 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); 304 for (unsigned I = 0; I < NF; ++I) { 305 // Adding implicit-use of super register to describe we are using part of 306 // super register, that prevents machine verifier complaining when part of 307 // subreg is undef, see comment in MachineVerifier::checkLiveness for more 308 // detail. 309 BuildMI(MBB, II, DL, TII->get(Opcode)) 310 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I)) 311 .addReg(Base, getKillRegState(I == NF - 1)) 312 .addMemOperand(*(II->memoperands_begin())) 313 .addReg(SrcReg, RegState::Implicit); 314 if (I != NF - 1) 315 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) 316 .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) 317 .addReg(VL, getKillRegState(I == NF - 2)); 318 Base = NewBase; 319 } 320 II->eraseFromParent(); 321 } 322 323 // Split a VSPILLx_Mx pseudo into multiple whole register loads separated by 324 // LMUL*VLENB bytes. 325 void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const { 326 DebugLoc DL = II->getDebugLoc(); 327 MachineBasicBlock &MBB = *II->getParent(); 328 MachineFunction &MF = *MBB.getParent(); 329 MachineRegisterInfo &MRI = MF.getRegInfo(); 330 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 331 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 332 333 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); 334 unsigned NF = ZvlssegInfo->first; 335 unsigned LMUL = ZvlssegInfo->second; 336 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); 337 unsigned Opcode, SubRegIdx; 338 switch (LMUL) { 339 default: 340 llvm_unreachable("LMUL must be 1, 2, or 4."); 341 case 1: 342 Opcode = RISCV::VL1RE8_V; 343 SubRegIdx = RISCV::sub_vrm1_0; 344 break; 345 case 2: 346 Opcode = RISCV::VL2RE8_V; 347 SubRegIdx = RISCV::sub_vrm2_0; 348 break; 349 case 4: 350 Opcode = RISCV::VL4RE8_V; 351 SubRegIdx = RISCV::sub_vrm4_0; 352 break; 353 } 354 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 355 "Unexpected subreg numbering"); 356 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 357 "Unexpected subreg numbering"); 358 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 359 "Unexpected subreg numbering"); 360 361 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); 362 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); 363 uint32_t ShiftAmount = Log2_32(LMUL); 364 if (ShiftAmount != 0) 365 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) 366 .addReg(VL) 367 .addImm(ShiftAmount); 368 369 Register DestReg = II->getOperand(0).getReg(); 370 Register Base = II->getOperand(1).getReg(); 371 bool IsBaseKill = II->getOperand(1).isKill(); 372 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); 373 for (unsigned I = 0; I < NF; ++I) { 374 BuildMI(MBB, II, DL, TII->get(Opcode), 375 TRI->getSubReg(DestReg, SubRegIdx + I)) 376 .addReg(Base, getKillRegState(I == NF - 1)) 377 .addMemOperand(*(II->memoperands_begin())); 378 if (I != NF - 1) 379 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) 380 .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) 381 .addReg(VL, getKillRegState(I == NF - 2)); 382 Base = NewBase; 383 } 384 II->eraseFromParent(); 385 } 386 387 bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 388 int SPAdj, unsigned FIOperandNum, 389 RegScavenger *RS) const { 390 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value"); 391 392 MachineInstr &MI = *II; 393 MachineFunction &MF = *MI.getParent()->getParent(); 394 MachineRegisterInfo &MRI = MF.getRegInfo(); 395 const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>(); 396 DebugLoc DL = MI.getDebugLoc(); 397 398 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 399 Register FrameReg; 400 StackOffset Offset = 401 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg); 402 bool IsRVVSpill = RISCV::isRVVSpill(MI); 403 if (!IsRVVSpill) 404 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm()); 405 406 if (Offset.getScalable() && 407 ST.getRealMinVLen() == ST.getRealMaxVLen()) { 408 // For an exact VLEN value, scalable offsets become constant and thus 409 // can be converted entirely into fixed offsets. 410 int64_t FixedValue = Offset.getFixed(); 411 int64_t ScalableValue = Offset.getScalable(); 412 assert(ScalableValue % 8 == 0 && 413 "Scalable offset is not a multiple of a single vector size."); 414 int64_t NumOfVReg = ScalableValue / 8; 415 int64_t VLENB = ST.getRealMinVLen() / 8; 416 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB); 417 } 418 419 if (!isInt<32>(Offset.getFixed())) { 420 report_fatal_error( 421 "Frame offsets outside of the signed 32-bit range not supported"); 422 } 423 424 if (!IsRVVSpill) { 425 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) { 426 // We chose to emit the canonical immediate sequence rather than folding 427 // the offset into the using add under the theory that doing so doesn't 428 // save dynamic instruction count and some target may fuse the canonical 429 // 32 bit immediate sequence. We still need to clear the portion of the 430 // offset encoded in the immediate. 431 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0); 432 } else { 433 // We can encode an add with 12 bit signed immediate in the immediate 434 // operand of our user instruction. As a result, the remaining 435 // offset can by construction, at worst, a LUI and a ADD. 436 int64_t Val = Offset.getFixed(); 437 int64_t Lo12 = SignExtend64<12>(Val); 438 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12); 439 Offset = StackOffset::get((uint64_t)Val - (uint64_t)Lo12, 440 Offset.getScalable()); 441 } 442 } 443 444 if (Offset.getScalable() || Offset.getFixed()) { 445 Register DestReg; 446 if (MI.getOpcode() == RISCV::ADDI) 447 DestReg = MI.getOperand(0).getReg(); 448 else 449 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 450 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset, 451 MachineInstr::NoFlags, std::nullopt); 452 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false, 453 /*IsImp*/false, 454 /*IsKill*/true); 455 } else { 456 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false, 457 /*IsImp*/false, 458 /*IsKill*/false); 459 } 460 461 // If after materializing the adjustment, we have a pointless ADDI, remove it 462 if (MI.getOpcode() == RISCV::ADDI && 463 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() && 464 MI.getOperand(2).getImm() == 0) { 465 MI.eraseFromParent(); 466 return true; 467 } 468 469 // Handle spill/fill of synthetic register classes for segment operations to 470 // ensure correctness in the edge case one gets spilled. There are many 471 // possible optimizations here, but given the extreme rarity of such spills, 472 // we prefer simplicity of implementation for now. 473 switch (MI.getOpcode()) { 474 case RISCV::PseudoVSPILL2_M1: 475 case RISCV::PseudoVSPILL2_M2: 476 case RISCV::PseudoVSPILL2_M4: 477 case RISCV::PseudoVSPILL3_M1: 478 case RISCV::PseudoVSPILL3_M2: 479 case RISCV::PseudoVSPILL4_M1: 480 case RISCV::PseudoVSPILL4_M2: 481 case RISCV::PseudoVSPILL5_M1: 482 case RISCV::PseudoVSPILL6_M1: 483 case RISCV::PseudoVSPILL7_M1: 484 case RISCV::PseudoVSPILL8_M1: 485 lowerVSPILL(II); 486 return true; 487 case RISCV::PseudoVRELOAD2_M1: 488 case RISCV::PseudoVRELOAD2_M2: 489 case RISCV::PseudoVRELOAD2_M4: 490 case RISCV::PseudoVRELOAD3_M1: 491 case RISCV::PseudoVRELOAD3_M2: 492 case RISCV::PseudoVRELOAD4_M1: 493 case RISCV::PseudoVRELOAD4_M2: 494 case RISCV::PseudoVRELOAD5_M1: 495 case RISCV::PseudoVRELOAD6_M1: 496 case RISCV::PseudoVRELOAD7_M1: 497 case RISCV::PseudoVRELOAD8_M1: 498 lowerVRELOAD(II); 499 return true; 500 } 501 502 return false; 503 } 504 505 bool RISCVRegisterInfo::requiresVirtualBaseRegisters( 506 const MachineFunction &MF) const { 507 return true; 508 } 509 510 // Returns true if the instruction's frame index reference would be better 511 // served by a base register other than FP or SP. 512 // Used by LocalStackSlotAllocation pass to determine which frame index 513 // references it should create new base registers for. 514 bool RISCVRegisterInfo::needsFrameBaseReg(MachineInstr *MI, 515 int64_t Offset) const { 516 unsigned FIOperandNum = 0; 517 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++) 518 assert(FIOperandNum < MI->getNumOperands() && 519 "Instr doesn't have FrameIndex operand"); 520 521 // For RISC-V, The machine instructions that include a FrameIndex operand 522 // are load/store, ADDI instructions. 523 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags); 524 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS) 525 return false; 526 // We only generate virtual base registers for loads and stores, so 527 // return false for everything else. 528 if (!MI->mayLoad() && !MI->mayStore()) 529 return false; 530 531 const MachineFunction &MF = *MI->getMF(); 532 const MachineFrameInfo &MFI = MF.getFrameInfo(); 533 const RISCVFrameLowering *TFI = getFrameLowering(MF); 534 const MachineRegisterInfo &MRI = MF.getRegInfo(); 535 unsigned CalleeSavedSize = 0; 536 Offset += getFrameIndexInstrOffset(MI, FIOperandNum); 537 538 // Estimate the stack size used to store callee saved registers( 539 // excludes reserved registers). 540 BitVector ReservedRegs = getReservedRegs(MF); 541 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) { 542 if (!ReservedRegs.test(Reg)) 543 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg)); 544 } 545 546 int64_t MaxFPOffset = Offset - CalleeSavedSize; 547 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) 548 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset); 549 550 // Assume 128 bytes spill slots size to estimate the maximum possible 551 // offset relative to the stack pointer. 552 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a 553 // real one for RISC-V. 554 int64_t MaxSPOffset = Offset + 128; 555 MaxSPOffset += MFI.getLocalFrameSize(); 556 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset); 557 } 558 559 // Determine whether a given base register plus offset immediate is 560 // encodable to resolve a frame index. 561 bool RISCVRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 562 Register BaseReg, 563 int64_t Offset) const { 564 unsigned FIOperandNum = 0; 565 while (!MI->getOperand(FIOperandNum).isFI()) { 566 FIOperandNum++; 567 assert(FIOperandNum < MI->getNumOperands() && 568 "Instr does not have a FrameIndex operand!"); 569 } 570 571 Offset += getFrameIndexInstrOffset(MI, FIOperandNum); 572 return isInt<12>(Offset); 573 } 574 575 // Insert defining instruction(s) for a pointer to FrameIdx before 576 // insertion point I. 577 // Return materialized frame pointer. 578 Register RISCVRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 579 int FrameIdx, 580 int64_t Offset) const { 581 MachineBasicBlock::iterator MBBI = MBB->begin(); 582 DebugLoc DL; 583 if (MBBI != MBB->end()) 584 DL = MBBI->getDebugLoc(); 585 MachineFunction *MF = MBB->getParent(); 586 MachineRegisterInfo &MFI = MF->getRegInfo(); 587 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 588 589 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass); 590 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg) 591 .addFrameIndex(FrameIdx) 592 .addImm(Offset); 593 return BaseReg; 594 } 595 596 // Resolve a frame index operand of an instruction to reference the 597 // indicated base register plus offset instead. 598 void RISCVRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 599 int64_t Offset) const { 600 unsigned FIOperandNum = 0; 601 while (!MI.getOperand(FIOperandNum).isFI()) { 602 FIOperandNum++; 603 assert(FIOperandNum < MI.getNumOperands() && 604 "Instr does not have a FrameIndex operand!"); 605 } 606 607 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum); 608 // FrameIndex Operands are always represented as a 609 // register followed by an immediate. 610 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false); 611 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 612 } 613 614 // Get the offset from the referenced frame index in the instruction, 615 // if there is one. 616 int64_t RISCVRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI, 617 int Idx) const { 618 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI || 619 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) && 620 "The MI must be I or S format."); 621 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a " 622 "FrameIndex operand"); 623 return MI->getOperand(Idx + 1).getImm(); 624 } 625 626 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 627 const TargetFrameLowering *TFI = getFrameLowering(MF); 628 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2; 629 } 630 631 const uint32_t * 632 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF, 633 CallingConv::ID CC) const { 634 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 635 636 if (CC == CallingConv::GHC) 637 return CSR_NoRegs_RegMask; 638 switch (Subtarget.getTargetABI()) { 639 default: 640 llvm_unreachable("Unrecognized ABI"); 641 case RISCVABI::ABI_ILP32: 642 case RISCVABI::ABI_LP64: 643 return CSR_ILP32_LP64_RegMask; 644 case RISCVABI::ABI_ILP32F: 645 case RISCVABI::ABI_LP64F: 646 return CSR_ILP32F_LP64F_RegMask; 647 case RISCVABI::ABI_ILP32D: 648 case RISCVABI::ABI_LP64D: 649 return CSR_ILP32D_LP64D_RegMask; 650 } 651 } 652 653 const TargetRegisterClass * 654 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 655 const MachineFunction &) const { 656 if (RC == &RISCV::VMV0RegClass) 657 return &RISCV::VRRegClass; 658 return RC; 659 } 660 661 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, 662 SmallVectorImpl<uint64_t> &Ops) const { 663 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8> 664 // to represent one vector register. The dwarf offset is 665 // VLENB * scalable_offset / 8. 666 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset"); 667 668 // Add fixed-sized offset using existing DIExpression interface. 669 DIExpression::appendOffset(Ops, Offset.getFixed()); 670 671 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true); 672 int64_t VLENBSized = Offset.getScalable() / 8; 673 if (VLENBSized > 0) { 674 Ops.push_back(dwarf::DW_OP_constu); 675 Ops.push_back(VLENBSized); 676 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); 677 Ops.push_back(dwarf::DW_OP_mul); 678 Ops.push_back(dwarf::DW_OP_plus); 679 } else if (VLENBSized < 0) { 680 Ops.push_back(dwarf::DW_OP_constu); 681 Ops.push_back(-VLENBSized); 682 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); 683 Ops.push_back(dwarf::DW_OP_mul); 684 Ops.push_back(dwarf::DW_OP_minus); 685 } 686 } 687 688 unsigned 689 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const { 690 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0; 691 } 692 693 // Add two address hints to improve chances of being able to use a compressed 694 // instruction. 695 bool RISCVRegisterInfo::getRegAllocationHints( 696 Register VirtReg, ArrayRef<MCPhysReg> Order, 697 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 698 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 699 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 700 701 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 702 VirtReg, Order, Hints, MF, VRM, Matrix); 703 704 if (!VRM || DisableRegAllocHints) 705 return BaseImplRetVal; 706 707 // Add any two address hints after any copy hints. 708 SmallSet<Register, 4> TwoAddrHints; 709 710 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO, 711 bool NeedGPRC) -> void { 712 Register Reg = MO.getReg(); 713 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg)); 714 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) { 715 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!"); 716 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 717 TwoAddrHints.insert(PhysReg); 718 } 719 }; 720 721 // This is all of the compressible binary instructions. If an instruction 722 // needs GPRC register class operands \p NeedGPRC will be set to true. 723 auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) { 724 NeedGPRC = false; 725 switch (MI.getOpcode()) { 726 default: 727 return false; 728 case RISCV::AND: 729 case RISCV::OR: 730 case RISCV::XOR: 731 case RISCV::SUB: 732 case RISCV::ADDW: 733 case RISCV::SUBW: 734 NeedGPRC = true; 735 return true; 736 case RISCV::ANDI: 737 NeedGPRC = true; 738 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); 739 case RISCV::SRAI: 740 case RISCV::SRLI: 741 NeedGPRC = true; 742 return true; 743 case RISCV::ADD: 744 case RISCV::SLLI: 745 return true; 746 case RISCV::ADDI: 747 case RISCV::ADDIW: 748 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); 749 } 750 }; 751 752 // Returns true if this operand is compressible. For non-registers it always 753 // returns true. Immediate range was already checked in isCompressible. 754 // For registers, it checks if the register is a GPRC register. reg-reg 755 // instructions that require GPRC need all register operands to be GPRC. 756 auto isCompressibleOpnd = [&](const MachineOperand &MO) { 757 if (!MO.isReg()) 758 return true; 759 Register Reg = MO.getReg(); 760 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg)); 761 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg); 762 }; 763 764 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) { 765 const MachineInstr &MI = *MO.getParent(); 766 unsigned OpIdx = MO.getOperandNo(); 767 bool NeedGPRC; 768 if (isCompressible(MI, NeedGPRC)) { 769 if (OpIdx == 0 && MI.getOperand(1).isReg()) { 770 if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2))) 771 tryAddHint(MO, MI.getOperand(1), NeedGPRC); 772 if (MI.isCommutable() && MI.getOperand(2).isReg() && 773 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) 774 tryAddHint(MO, MI.getOperand(2), NeedGPRC); 775 } else if (OpIdx == 1 && 776 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) { 777 tryAddHint(MO, MI.getOperand(0), NeedGPRC); 778 } else if (MI.isCommutable() && OpIdx == 2 && 779 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) { 780 tryAddHint(MO, MI.getOperand(0), NeedGPRC); 781 } 782 } 783 } 784 785 for (MCPhysReg OrderReg : Order) 786 if (TwoAddrHints.count(OrderReg)) 787 Hints.push_back(OrderReg); 788 789 return BaseImplRetVal; 790 } 791