1 //===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetRegisterInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVRegisterInfo.h"
14 #include "RISCV.h"
15 #include "RISCVMachineFunctionInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/CodeGen/TargetFrameLowering.h"
22 #include "llvm/CodeGen/TargetInstrInfo.h"
23 #include "llvm/Support/ErrorHandling.h"
24 
25 #define GET_REGINFO_TARGET_DESC
26 #include "RISCVGenRegisterInfo.inc"
27 
28 using namespace llvm;
29 
30 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
31 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
32 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
33 static_assert(RISCV::F31_H == RISCV::F0_H + 31,
34               "Register list not consecutive");
35 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
36 static_assert(RISCV::F31_F == RISCV::F0_F + 31,
37               "Register list not consecutive");
38 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
39 static_assert(RISCV::F31_D == RISCV::F0_D + 31,
40               "Register list not consecutive");
41 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
42 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
43 
44 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
45     : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
46                            /*PC*/0, HwMode) {}
47 
48 const MCPhysReg *
49 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
50   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
51   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
52     return CSR_NoRegs_SaveList;
53   if (MF->getFunction().hasFnAttribute("interrupt")) {
54     if (Subtarget.hasStdExtD())
55       return CSR_XLEN_F64_Interrupt_SaveList;
56     if (Subtarget.hasStdExtF())
57       return CSR_XLEN_F32_Interrupt_SaveList;
58     return CSR_Interrupt_SaveList;
59   }
60 
61   switch (Subtarget.getTargetABI()) {
62   default:
63     llvm_unreachable("Unrecognized ABI");
64   case RISCVABI::ABI_ILP32:
65   case RISCVABI::ABI_LP64:
66     return CSR_ILP32_LP64_SaveList;
67   case RISCVABI::ABI_ILP32F:
68   case RISCVABI::ABI_LP64F:
69     return CSR_ILP32F_LP64F_SaveList;
70   case RISCVABI::ABI_ILP32D:
71   case RISCVABI::ABI_LP64D:
72     return CSR_ILP32D_LP64D_SaveList;
73   }
74 }
75 
76 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
77   const RISCVFrameLowering *TFI = getFrameLowering(MF);
78   BitVector Reserved(getNumRegs());
79 
80   // Mark any registers requested to be reserved as such
81   for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
82     if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg))
83       markSuperRegs(Reserved, Reg);
84   }
85 
86   // Use markSuperRegs to ensure any register aliases are also reserved
87   markSuperRegs(Reserved, RISCV::X0); // zero
88   markSuperRegs(Reserved, RISCV::X2); // sp
89   markSuperRegs(Reserved, RISCV::X3); // gp
90   markSuperRegs(Reserved, RISCV::X4); // tp
91   if (TFI->hasFP(MF))
92     markSuperRegs(Reserved, RISCV::X8); // fp
93   // Reserve the base register if we need to realign the stack and allocate
94   // variable-sized objects at runtime.
95   if (TFI->hasBP(MF))
96     markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
97 
98   // V registers for code generation. We handle them manually.
99   markSuperRegs(Reserved, RISCV::VL);
100   markSuperRegs(Reserved, RISCV::VTYPE);
101   markSuperRegs(Reserved, RISCV::VXSAT);
102   markSuperRegs(Reserved, RISCV::VXRM);
103 
104   // Floating point environment registers.
105   markSuperRegs(Reserved, RISCV::FRM);
106   markSuperRegs(Reserved, RISCV::FFLAGS);
107   markSuperRegs(Reserved, RISCV::FCSR);
108 
109   assert(checkAllSuperRegsMarked(Reserved));
110   return Reserved;
111 }
112 
113 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
114                                          MCRegister PhysReg) const {
115   return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
116 }
117 
118 bool RISCVRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
119   return PhysReg == RISCV::X0;
120 }
121 
122 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
123   return CSR_NoRegs_RegMask;
124 }
125 
126 // Frame indexes representing locations of CSRs which are given a fixed location
127 // by save/restore libcalls.
128 static const std::map<unsigned, int> FixedCSRFIMap = {
129   {/*ra*/  RISCV::X1,   -1},
130   {/*s0*/  RISCV::X8,   -2},
131   {/*s1*/  RISCV::X9,   -3},
132   {/*s2*/  RISCV::X18,  -4},
133   {/*s3*/  RISCV::X19,  -5},
134   {/*s4*/  RISCV::X20,  -6},
135   {/*s5*/  RISCV::X21,  -7},
136   {/*s6*/  RISCV::X22,  -8},
137   {/*s7*/  RISCV::X23,  -9},
138   {/*s8*/  RISCV::X24,  -10},
139   {/*s9*/  RISCV::X25,  -11},
140   {/*s10*/ RISCV::X26,  -12},
141   {/*s11*/ RISCV::X27,  -13}
142 };
143 
144 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
145                                              Register Reg,
146                                              int &FrameIdx) const {
147   const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
148   if (!RVFI->useSaveRestoreLibCalls(MF))
149     return false;
150 
151   auto FII = FixedCSRFIMap.find(Reg);
152   if (FII == FixedCSRFIMap.end())
153     return false;
154 
155   FrameIdx = FII->second;
156   return true;
157 }
158 
159 void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
160                                             int SPAdj, unsigned FIOperandNum,
161                                             RegScavenger *RS) const {
162   assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
163 
164   MachineInstr &MI = *II;
165   MachineFunction &MF = *MI.getParent()->getParent();
166   MachineRegisterInfo &MRI = MF.getRegInfo();
167   const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
168   DebugLoc DL = MI.getDebugLoc();
169 
170   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
171   Register FrameReg;
172   StackOffset Offset =
173       getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
174   bool IsRVVSpill = TII->isRVVSpill(MI, /*CheckFIs*/ false);
175   if (!IsRVVSpill)
176     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
177 
178   if (!isInt<32>(Offset.getFixed())) {
179     report_fatal_error(
180         "Frame offsets outside of the signed 32-bit range not supported");
181   }
182 
183   MachineBasicBlock &MBB = *MI.getParent();
184   bool FrameRegIsKill = false;
185 
186   // If required, pre-compute the scalable factor amount which will be used in
187   // later offset computation. Since this sequence requires up to two scratch
188   // registers -- after which one is made free -- this grants us better
189   // scavenging of scratch registers as only up to two are live at one time,
190   // rather than three.
191   Register ScalableFactorRegister;
192   unsigned ScalableAdjOpc = RISCV::ADD;
193   if (Offset.getScalable()) {
194     int64_t ScalableValue = Offset.getScalable();
195     if (ScalableValue < 0) {
196       ScalableValue = -ScalableValue;
197       ScalableAdjOpc = RISCV::SUB;
198     }
199     // 1. Get vlenb && multiply vlen with the number of vector registers.
200     ScalableFactorRegister =
201         TII->getVLENFactoredAmount(MF, MBB, II, DL, ScalableValue);
202   }
203 
204   if (!isInt<12>(Offset.getFixed())) {
205     // The offset won't fit in an immediate, so use a scratch register instead
206     // Modify Offset and FrameReg appropriately
207     Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
208     TII->movImm(MBB, II, DL, ScratchReg, Offset.getFixed());
209     if (MI.getOpcode() == RISCV::ADDI && !Offset.getScalable()) {
210       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), MI.getOperand(0).getReg())
211         .addReg(FrameReg)
212         .addReg(ScratchReg, RegState::Kill);
213       MI.eraseFromParent();
214       return;
215     }
216     BuildMI(MBB, II, DL, TII->get(RISCV::ADD), ScratchReg)
217         .addReg(FrameReg)
218         .addReg(ScratchReg, RegState::Kill);
219     Offset = StackOffset::get(0, Offset.getScalable());
220     FrameReg = ScratchReg;
221     FrameRegIsKill = true;
222   }
223 
224   if (!Offset.getScalable()) {
225     // Offset = (fixed offset, 0)
226     MI.getOperand(FIOperandNum)
227         .ChangeToRegister(FrameReg, false, false, FrameRegIsKill);
228     if (!IsRVVSpill)
229       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
230     else {
231       if (Offset.getFixed()) {
232         Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
233         BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), ScratchReg)
234           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
235           .addImm(Offset.getFixed());
236         MI.getOperand(FIOperandNum)
237           .ChangeToRegister(ScratchReg, false, false, true);
238       }
239     }
240   } else {
241     // Offset = (fixed offset, scalable offset)
242     // Step 1, the scalable offset, has already been computed.
243     assert(ScalableFactorRegister &&
244            "Expected pre-computation of scalable factor in earlier step");
245 
246     // 2. Calculate address: FrameReg + result of multiply
247     if (MI.getOpcode() == RISCV::ADDI && !Offset.getFixed()) {
248       BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), MI.getOperand(0).getReg())
249           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
250           .addReg(ScalableFactorRegister, RegState::Kill);
251       MI.eraseFromParent();
252       return;
253     }
254     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
255     BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), VL)
256         .addReg(FrameReg, getKillRegState(FrameRegIsKill))
257         .addReg(ScalableFactorRegister, RegState::Kill);
258 
259     if (IsRVVSpill && Offset.getFixed()) {
260       // Scalable load/store has no immediate argument. We need to add the
261       // fixed part into the load/store base address.
262       BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), VL)
263           .addReg(VL)
264           .addImm(Offset.getFixed());
265     }
266 
267     // 3. Replace address register with calculated address register
268     MI.getOperand(FIOperandNum).ChangeToRegister(VL, false, false, true);
269     if (!IsRVVSpill)
270       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
271   }
272 
273   auto ZvlssegInfo = TII->isRVVSpillForZvlsseg(MI.getOpcode());
274   if (ZvlssegInfo) {
275     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
276     BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
277     uint32_t ShiftAmount = Log2_32(ZvlssegInfo->second);
278     if (ShiftAmount != 0)
279       BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
280           .addReg(VL)
281           .addImm(ShiftAmount);
282     // The last argument of pseudo spilling opcode for zvlsseg is the length of
283     // one element of zvlsseg types. For example, for vint32m2x2_t, it will be
284     // the length of vint32m2_t.
285     MI.getOperand(FIOperandNum + 1).ChangeToRegister(VL, /*isDef=*/false);
286   }
287 }
288 
289 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
290   const TargetFrameLowering *TFI = getFrameLowering(MF);
291   return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
292 }
293 
294 const uint32_t *
295 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
296                                         CallingConv::ID CC) const {
297   auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
298 
299   if (CC == CallingConv::GHC)
300     return CSR_NoRegs_RegMask;
301   switch (Subtarget.getTargetABI()) {
302   default:
303     llvm_unreachable("Unrecognized ABI");
304   case RISCVABI::ABI_ILP32:
305   case RISCVABI::ABI_LP64:
306     return CSR_ILP32_LP64_RegMask;
307   case RISCVABI::ABI_ILP32F:
308   case RISCVABI::ABI_LP64F:
309     return CSR_ILP32F_LP64F_RegMask;
310   case RISCVABI::ABI_ILP32D:
311   case RISCVABI::ABI_LP64D:
312     return CSR_ILP32D_LP64D_RegMask;
313   }
314 }
315 
316 const TargetRegisterClass *
317 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
318                                              const MachineFunction &) const {
319   if (RC == &RISCV::VMV0RegClass)
320     return &RISCV::VRRegClass;
321   return RC;
322 }
323