1 //===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetRegisterInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVRegisterInfo.h"
14 #include "RISCV.h"
15 #include "RISCVMachineFunctionInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/CodeGen/TargetFrameLowering.h"
22 #include "llvm/CodeGen/TargetInstrInfo.h"
23 #include "llvm/IR/DebugInfoMetadata.h"
24 #include "llvm/Support/ErrorHandling.h"
25 
26 #define GET_REGINFO_TARGET_DESC
27 #include "RISCVGenRegisterInfo.inc"
28 
29 using namespace llvm;
30 
31 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
32 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
33 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
34 static_assert(RISCV::F31_H == RISCV::F0_H + 31,
35               "Register list not consecutive");
36 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
37 static_assert(RISCV::F31_F == RISCV::F0_F + 31,
38               "Register list not consecutive");
39 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
40 static_assert(RISCV::F31_D == RISCV::F0_D + 31,
41               "Register list not consecutive");
42 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
43 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
44 
45 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
46     : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
47                            /*PC*/0, HwMode) {}
48 
49 const MCPhysReg *
50 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
51   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
52   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
53     return CSR_NoRegs_SaveList;
54   if (MF->getFunction().hasFnAttribute("interrupt")) {
55     if (Subtarget.hasStdExtD())
56       return CSR_XLEN_F64_Interrupt_SaveList;
57     if (Subtarget.hasStdExtF())
58       return CSR_XLEN_F32_Interrupt_SaveList;
59     return CSR_Interrupt_SaveList;
60   }
61 
62   switch (Subtarget.getTargetABI()) {
63   default:
64     llvm_unreachable("Unrecognized ABI");
65   case RISCVABI::ABI_ILP32:
66   case RISCVABI::ABI_LP64:
67     return CSR_ILP32_LP64_SaveList;
68   case RISCVABI::ABI_ILP32F:
69   case RISCVABI::ABI_LP64F:
70     return CSR_ILP32F_LP64F_SaveList;
71   case RISCVABI::ABI_ILP32D:
72   case RISCVABI::ABI_LP64D:
73     return CSR_ILP32D_LP64D_SaveList;
74   }
75 }
76 
77 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
78   const RISCVFrameLowering *TFI = getFrameLowering(MF);
79   BitVector Reserved(getNumRegs());
80 
81   // Mark any registers requested to be reserved as such
82   for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
83     if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg))
84       markSuperRegs(Reserved, Reg);
85   }
86 
87   // Use markSuperRegs to ensure any register aliases are also reserved
88   markSuperRegs(Reserved, RISCV::X0); // zero
89   markSuperRegs(Reserved, RISCV::X2); // sp
90   markSuperRegs(Reserved, RISCV::X3); // gp
91   markSuperRegs(Reserved, RISCV::X4); // tp
92   if (TFI->hasFP(MF))
93     markSuperRegs(Reserved, RISCV::X8); // fp
94   // Reserve the base register if we need to realign the stack and allocate
95   // variable-sized objects at runtime.
96   if (TFI->hasBP(MF))
97     markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
98 
99   // V registers for code generation. We handle them manually.
100   markSuperRegs(Reserved, RISCV::VL);
101   markSuperRegs(Reserved, RISCV::VTYPE);
102   markSuperRegs(Reserved, RISCV::VXSAT);
103   markSuperRegs(Reserved, RISCV::VXRM);
104 
105   // Floating point environment registers.
106   markSuperRegs(Reserved, RISCV::FRM);
107   markSuperRegs(Reserved, RISCV::FFLAGS);
108 
109   assert(checkAllSuperRegsMarked(Reserved));
110   return Reserved;
111 }
112 
113 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
114                                          MCRegister PhysReg) const {
115   return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
116 }
117 
118 bool RISCVRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
119   return PhysReg == RISCV::X0;
120 }
121 
122 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
123   return CSR_NoRegs_RegMask;
124 }
125 
126 // Frame indexes representing locations of CSRs which are given a fixed location
127 // by save/restore libcalls.
128 static const std::map<unsigned, int> FixedCSRFIMap = {
129   {/*ra*/  RISCV::X1,   -1},
130   {/*s0*/  RISCV::X8,   -2},
131   {/*s1*/  RISCV::X9,   -3},
132   {/*s2*/  RISCV::X18,  -4},
133   {/*s3*/  RISCV::X19,  -5},
134   {/*s4*/  RISCV::X20,  -6},
135   {/*s5*/  RISCV::X21,  -7},
136   {/*s6*/  RISCV::X22,  -8},
137   {/*s7*/  RISCV::X23,  -9},
138   {/*s8*/  RISCV::X24,  -10},
139   {/*s9*/  RISCV::X25,  -11},
140   {/*s10*/ RISCV::X26,  -12},
141   {/*s11*/ RISCV::X27,  -13}
142 };
143 
144 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
145                                              Register Reg,
146                                              int &FrameIdx) const {
147   const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
148   if (!RVFI->useSaveRestoreLibCalls(MF))
149     return false;
150 
151   auto FII = FixedCSRFIMap.find(Reg);
152   if (FII == FixedCSRFIMap.end())
153     return false;
154 
155   FrameIdx = FII->second;
156   return true;
157 }
158 
159 void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
160                                             int SPAdj, unsigned FIOperandNum,
161                                             RegScavenger *RS) const {
162   assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
163 
164   MachineInstr &MI = *II;
165   MachineFunction &MF = *MI.getParent()->getParent();
166   MachineRegisterInfo &MRI = MF.getRegInfo();
167   const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
168   DebugLoc DL = MI.getDebugLoc();
169 
170   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
171   Register FrameReg;
172   StackOffset Offset =
173       getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
174   bool IsRVVSpill = TII->isRVVSpill(MI, /*CheckFIs*/ false);
175   if (!IsRVVSpill)
176     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
177 
178   if (!isInt<32>(Offset.getFixed())) {
179     report_fatal_error(
180         "Frame offsets outside of the signed 32-bit range not supported");
181   }
182 
183   MachineBasicBlock &MBB = *MI.getParent();
184   bool FrameRegIsKill = false;
185 
186   // If required, pre-compute the scalable factor amount which will be used in
187   // later offset computation. Since this sequence requires up to two scratch
188   // registers -- after which one is made free -- this grants us better
189   // scavenging of scratch registers as only up to two are live at one time,
190   // rather than three.
191   Register ScalableFactorRegister;
192   unsigned ScalableAdjOpc = RISCV::ADD;
193   if (Offset.getScalable()) {
194     int64_t ScalableValue = Offset.getScalable();
195     if (ScalableValue < 0) {
196       ScalableValue = -ScalableValue;
197       ScalableAdjOpc = RISCV::SUB;
198     }
199     // 1. Get vlenb && multiply vlen with the number of vector registers.
200     ScalableFactorRegister =
201         TII->getVLENFactoredAmount(MF, MBB, II, DL, ScalableValue);
202   }
203 
204   if (!isInt<12>(Offset.getFixed())) {
205     // The offset won't fit in an immediate, so use a scratch register instead
206     // Modify Offset and FrameReg appropriately
207     Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
208     TII->movImm(MBB, II, DL, ScratchReg, Offset.getFixed());
209     if (MI.getOpcode() == RISCV::ADDI && !Offset.getScalable()) {
210       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), MI.getOperand(0).getReg())
211         .addReg(FrameReg)
212         .addReg(ScratchReg, RegState::Kill);
213       MI.eraseFromParent();
214       return;
215     }
216     BuildMI(MBB, II, DL, TII->get(RISCV::ADD), ScratchReg)
217         .addReg(FrameReg)
218         .addReg(ScratchReg, RegState::Kill);
219     Offset = StackOffset::get(0, Offset.getScalable());
220     FrameReg = ScratchReg;
221     FrameRegIsKill = true;
222   }
223 
224   if (!Offset.getScalable()) {
225     // Offset = (fixed offset, 0)
226     MI.getOperand(FIOperandNum)
227         .ChangeToRegister(FrameReg, false, false, FrameRegIsKill);
228     if (!IsRVVSpill)
229       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
230     else {
231       if (Offset.getFixed()) {
232         Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
233         BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), ScratchReg)
234           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
235           .addImm(Offset.getFixed());
236         MI.getOperand(FIOperandNum)
237           .ChangeToRegister(ScratchReg, false, false, true);
238       }
239     }
240   } else {
241     // Offset = (fixed offset, scalable offset)
242     // Step 1, the scalable offset, has already been computed.
243     assert(ScalableFactorRegister &&
244            "Expected pre-computation of scalable factor in earlier step");
245 
246     // 2. Calculate address: FrameReg + result of multiply
247     if (MI.getOpcode() == RISCV::ADDI && !Offset.getFixed()) {
248       BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), MI.getOperand(0).getReg())
249           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
250           .addReg(ScalableFactorRegister, RegState::Kill);
251       MI.eraseFromParent();
252       return;
253     }
254     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
255     BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), VL)
256         .addReg(FrameReg, getKillRegState(FrameRegIsKill))
257         .addReg(ScalableFactorRegister, RegState::Kill);
258 
259     if (IsRVVSpill && Offset.getFixed()) {
260       // Scalable load/store has no immediate argument. We need to add the
261       // fixed part into the load/store base address.
262       BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), VL)
263           .addReg(VL)
264           .addImm(Offset.getFixed());
265     }
266 
267     // 3. Replace address register with calculated address register
268     MI.getOperand(FIOperandNum).ChangeToRegister(VL, false, false, true);
269     if (!IsRVVSpill)
270       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
271   }
272 
273   auto ZvlssegInfo = TII->isRVVSpillForZvlsseg(MI.getOpcode());
274   if (ZvlssegInfo) {
275     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
276     BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
277     uint32_t ShiftAmount = Log2_32(ZvlssegInfo->second);
278     if (ShiftAmount != 0)
279       BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
280           .addReg(VL)
281           .addImm(ShiftAmount);
282     // The last argument of pseudo spilling opcode for zvlsseg is the length of
283     // one element of zvlsseg types. For example, for vint32m2x2_t, it will be
284     // the length of vint32m2_t.
285     MI.getOperand(FIOperandNum + 1).ChangeToRegister(VL, /*isDef=*/false);
286   }
287 }
288 
289 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
290   const TargetFrameLowering *TFI = getFrameLowering(MF);
291   return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
292 }
293 
294 const uint32_t *
295 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
296                                         CallingConv::ID CC) const {
297   auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
298 
299   if (CC == CallingConv::GHC)
300     return CSR_NoRegs_RegMask;
301   switch (Subtarget.getTargetABI()) {
302   default:
303     llvm_unreachable("Unrecognized ABI");
304   case RISCVABI::ABI_ILP32:
305   case RISCVABI::ABI_LP64:
306     return CSR_ILP32_LP64_RegMask;
307   case RISCVABI::ABI_ILP32F:
308   case RISCVABI::ABI_LP64F:
309     return CSR_ILP32F_LP64F_RegMask;
310   case RISCVABI::ABI_ILP32D:
311   case RISCVABI::ABI_LP64D:
312     return CSR_ILP32D_LP64D_RegMask;
313   }
314 }
315 
316 const TargetRegisterClass *
317 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
318                                              const MachineFunction &) const {
319   if (RC == &RISCV::VMV0RegClass)
320     return &RISCV::VRRegClass;
321   return RC;
322 }
323 
324 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
325                                          SmallVectorImpl<uint64_t> &Ops) const {
326   // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
327   // to represent one vector register. The dwarf offset is
328   // VLENB * scalable_offset / 8.
329   assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
330 
331   // Add fixed-sized offset using existing DIExpression interface.
332   DIExpression::appendOffset(Ops, Offset.getFixed());
333 
334   unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
335   int64_t VLENBSized = Offset.getScalable() / 8;
336   if (VLENBSized > 0) {
337     Ops.push_back(dwarf::DW_OP_constu);
338     Ops.push_back(VLENBSized);
339     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
340     Ops.push_back(dwarf::DW_OP_mul);
341     Ops.push_back(dwarf::DW_OP_plus);
342   } else if (VLENBSized < 0) {
343     Ops.push_back(dwarf::DW_OP_constu);
344     Ops.push_back(-VLENBSized);
345     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
346     Ops.push_back(dwarf::DW_OP_mul);
347     Ops.push_back(dwarf::DW_OP_minus);
348   }
349 }
350 
351 unsigned
352 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
353   return MF.getSubtarget<RISCVSubtarget>().hasStdExtC() ? 1 : 0;
354 }
355