1 //===-- RISCVRegisterInfo.cpp - RISCV Register Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetRegisterInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVRegisterInfo.h"
14 #include "RISCV.h"
15 #include "RISCVMachineFunctionInfo.h"
16 #include "RISCVSubtarget.h"
17 #include "llvm/BinaryFormat/Dwarf.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/IR/DebugInfoMetadata.h"
25 #include "llvm/Support/ErrorHandling.h"
26 
27 #define GET_REGINFO_TARGET_DESC
28 #include "RISCVGenRegisterInfo.inc"
29 
30 using namespace llvm;
31 
32 static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
33 static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
34 static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
35 static_assert(RISCV::F31_H == RISCV::F0_H + 31,
36               "Register list not consecutive");
37 static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
38 static_assert(RISCV::F31_F == RISCV::F0_F + 31,
39               "Register list not consecutive");
40 static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
41 static_assert(RISCV::F31_D == RISCV::F0_D + 31,
42               "Register list not consecutive");
43 static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
44 static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
45 
46 RISCVRegisterInfo::RISCVRegisterInfo(unsigned HwMode)
47     : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
48                            /*PC*/0, HwMode) {}
49 
50 const MCPhysReg *
51 RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
52   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
53   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
54     return CSR_NoRegs_SaveList;
55   if (MF->getFunction().hasFnAttribute("interrupt")) {
56     if (Subtarget.hasStdExtD())
57       return CSR_XLEN_F64_Interrupt_SaveList;
58     if (Subtarget.hasStdExtF())
59       return CSR_XLEN_F32_Interrupt_SaveList;
60     return CSR_Interrupt_SaveList;
61   }
62 
63   switch (Subtarget.getTargetABI()) {
64   default:
65     llvm_unreachable("Unrecognized ABI");
66   case RISCVABI::ABI_ILP32:
67   case RISCVABI::ABI_LP64:
68     return CSR_ILP32_LP64_SaveList;
69   case RISCVABI::ABI_ILP32F:
70   case RISCVABI::ABI_LP64F:
71     return CSR_ILP32F_LP64F_SaveList;
72   case RISCVABI::ABI_ILP32D:
73   case RISCVABI::ABI_LP64D:
74     return CSR_ILP32D_LP64D_SaveList;
75   }
76 }
77 
78 BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
79   const RISCVFrameLowering *TFI = getFrameLowering(MF);
80   BitVector Reserved(getNumRegs());
81 
82   // Mark any registers requested to be reserved as such
83   for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
84     if (MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(Reg))
85       markSuperRegs(Reserved, Reg);
86   }
87 
88   // Use markSuperRegs to ensure any register aliases are also reserved
89   markSuperRegs(Reserved, RISCV::X0); // zero
90   markSuperRegs(Reserved, RISCV::X2); // sp
91   markSuperRegs(Reserved, RISCV::X3); // gp
92   markSuperRegs(Reserved, RISCV::X4); // tp
93   if (TFI->hasFP(MF))
94     markSuperRegs(Reserved, RISCV::X8); // fp
95   // Reserve the base register if we need to realign the stack and allocate
96   // variable-sized objects at runtime.
97   if (TFI->hasBP(MF))
98     markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
99 
100   // V registers for code generation. We handle them manually.
101   markSuperRegs(Reserved, RISCV::VL);
102   markSuperRegs(Reserved, RISCV::VTYPE);
103   markSuperRegs(Reserved, RISCV::VXSAT);
104   markSuperRegs(Reserved, RISCV::VXRM);
105   markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
106 
107   // Floating point environment registers.
108   markSuperRegs(Reserved, RISCV::FRM);
109   markSuperRegs(Reserved, RISCV::FFLAGS);
110 
111   assert(checkAllSuperRegsMarked(Reserved));
112   return Reserved;
113 }
114 
115 bool RISCVRegisterInfo::isAsmClobberable(const MachineFunction &MF,
116                                          MCRegister PhysReg) const {
117   return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
118 }
119 
120 bool RISCVRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
121   return PhysReg == RISCV::X0 || PhysReg == RISCV::VLENB;
122 }
123 
124 const uint32_t *RISCVRegisterInfo::getNoPreservedMask() const {
125   return CSR_NoRegs_RegMask;
126 }
127 
128 // Frame indexes representing locations of CSRs which are given a fixed location
129 // by save/restore libcalls.
130 static const std::pair<unsigned, int> FixedCSRFIMap[] = {
131   {/*ra*/  RISCV::X1,   -1},
132   {/*s0*/  RISCV::X8,   -2},
133   {/*s1*/  RISCV::X9,   -3},
134   {/*s2*/  RISCV::X18,  -4},
135   {/*s3*/  RISCV::X19,  -5},
136   {/*s4*/  RISCV::X20,  -6},
137   {/*s5*/  RISCV::X21,  -7},
138   {/*s6*/  RISCV::X22,  -8},
139   {/*s7*/  RISCV::X23,  -9},
140   {/*s8*/  RISCV::X24,  -10},
141   {/*s9*/  RISCV::X25,  -11},
142   {/*s10*/ RISCV::X26,  -12},
143   {/*s11*/ RISCV::X27,  -13}
144 };
145 
146 bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
147                                              Register Reg,
148                                              int &FrameIdx) const {
149   const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
150   if (!RVFI->useSaveRestoreLibCalls(MF))
151     return false;
152 
153   const auto *FII =
154       llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
155   if (FII == std::end(FixedCSRFIMap))
156     return false;
157 
158   FrameIdx = FII->second;
159   return true;
160 }
161 
162 void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
163                                             int SPAdj, unsigned FIOperandNum,
164                                             RegScavenger *RS) const {
165   assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
166 
167   MachineInstr &MI = *II;
168   MachineFunction &MF = *MI.getParent()->getParent();
169   MachineRegisterInfo &MRI = MF.getRegInfo();
170   const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
171   DebugLoc DL = MI.getDebugLoc();
172 
173   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
174   Register FrameReg;
175   StackOffset Offset =
176       getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
177   bool IsRVVSpill = RISCV::isRVVSpill(MI);
178   if (!IsRVVSpill)
179     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
180 
181   if (!isInt<32>(Offset.getFixed())) {
182     report_fatal_error(
183         "Frame offsets outside of the signed 32-bit range not supported");
184   }
185 
186   MachineBasicBlock &MBB = *MI.getParent();
187   bool FrameRegIsKill = false;
188 
189   // If required, pre-compute the scalable factor amount which will be used in
190   // later offset computation. Since this sequence requires up to two scratch
191   // registers -- after which one is made free -- this grants us better
192   // scavenging of scratch registers as only up to two are live at one time,
193   // rather than three.
194   Register ScalableFactorRegister;
195   unsigned ScalableAdjOpc = RISCV::ADD;
196   if (Offset.getScalable()) {
197     int64_t ScalableValue = Offset.getScalable();
198     if (ScalableValue < 0) {
199       ScalableValue = -ScalableValue;
200       ScalableAdjOpc = RISCV::SUB;
201     }
202     // 1. Get vlenb && multiply vlen with the number of vector registers.
203     ScalableFactorRegister =
204         TII->getVLENFactoredAmount(MF, MBB, II, DL, ScalableValue);
205   }
206 
207   if (!isInt<12>(Offset.getFixed())) {
208     // The offset won't fit in an immediate, so use a scratch register instead
209     // Modify Offset and FrameReg appropriately
210     Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
211     TII->movImm(MBB, II, DL, ScratchReg, Offset.getFixed());
212     if (MI.getOpcode() == RISCV::ADDI && !Offset.getScalable()) {
213       BuildMI(MBB, II, DL, TII->get(RISCV::ADD), MI.getOperand(0).getReg())
214         .addReg(FrameReg)
215         .addReg(ScratchReg, RegState::Kill);
216       MI.eraseFromParent();
217       return;
218     }
219     BuildMI(MBB, II, DL, TII->get(RISCV::ADD), ScratchReg)
220         .addReg(FrameReg)
221         .addReg(ScratchReg, RegState::Kill);
222     Offset = StackOffset::get(0, Offset.getScalable());
223     FrameReg = ScratchReg;
224     FrameRegIsKill = true;
225   }
226 
227   if (!Offset.getScalable()) {
228     // Offset = (fixed offset, 0)
229     MI.getOperand(FIOperandNum)
230         .ChangeToRegister(FrameReg, false, false, FrameRegIsKill);
231     if (!IsRVVSpill)
232       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
233     else {
234       if (Offset.getFixed()) {
235         Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
236         BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), ScratchReg)
237           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
238           .addImm(Offset.getFixed());
239         MI.getOperand(FIOperandNum)
240           .ChangeToRegister(ScratchReg, false, false, true);
241       }
242     }
243   } else {
244     // Offset = (fixed offset, scalable offset)
245     // Step 1, the scalable offset, has already been computed.
246     assert(ScalableFactorRegister &&
247            "Expected pre-computation of scalable factor in earlier step");
248 
249     // 2. Calculate address: FrameReg + result of multiply
250     if (MI.getOpcode() == RISCV::ADDI && !Offset.getFixed()) {
251       BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), MI.getOperand(0).getReg())
252           .addReg(FrameReg, getKillRegState(FrameRegIsKill))
253           .addReg(ScalableFactorRegister, RegState::Kill);
254       MI.eraseFromParent();
255       return;
256     }
257     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
258     BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), VL)
259         .addReg(FrameReg, getKillRegState(FrameRegIsKill))
260         .addReg(ScalableFactorRegister, RegState::Kill);
261 
262     if (IsRVVSpill && Offset.getFixed()) {
263       // Scalable load/store has no immediate argument. We need to add the
264       // fixed part into the load/store base address.
265       BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), VL)
266           .addReg(VL)
267           .addImm(Offset.getFixed());
268     }
269 
270     // 3. Replace address register with calculated address register
271     MI.getOperand(FIOperandNum).ChangeToRegister(VL, false, false, true);
272     if (!IsRVVSpill)
273       MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
274   }
275 
276   auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(MI.getOpcode());
277   if (ZvlssegInfo) {
278     Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
279     BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
280     uint32_t ShiftAmount = Log2_32(ZvlssegInfo->second);
281     if (ShiftAmount != 0)
282       BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
283           .addReg(VL)
284           .addImm(ShiftAmount);
285     // The last argument of pseudo spilling opcode for zvlsseg is the length of
286     // one element of zvlsseg types. For example, for vint32m2x2_t, it will be
287     // the length of vint32m2_t.
288     MI.getOperand(FIOperandNum + 1).ChangeToRegister(VL, /*isDef=*/false);
289   }
290 }
291 
292 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
293   const TargetFrameLowering *TFI = getFrameLowering(MF);
294   return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
295 }
296 
297 const uint32_t *
298 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
299                                         CallingConv::ID CC) const {
300   auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
301 
302   if (CC == CallingConv::GHC)
303     return CSR_NoRegs_RegMask;
304   switch (Subtarget.getTargetABI()) {
305   default:
306     llvm_unreachable("Unrecognized ABI");
307   case RISCVABI::ABI_ILP32:
308   case RISCVABI::ABI_LP64:
309     return CSR_ILP32_LP64_RegMask;
310   case RISCVABI::ABI_ILP32F:
311   case RISCVABI::ABI_LP64F:
312     return CSR_ILP32F_LP64F_RegMask;
313   case RISCVABI::ABI_ILP32D:
314   case RISCVABI::ABI_LP64D:
315     return CSR_ILP32D_LP64D_RegMask;
316   }
317 }
318 
319 const TargetRegisterClass *
320 RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
321                                              const MachineFunction &) const {
322   if (RC == &RISCV::VMV0RegClass)
323     return &RISCV::VRRegClass;
324   return RC;
325 }
326 
327 void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
328                                          SmallVectorImpl<uint64_t> &Ops) const {
329   // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
330   // to represent one vector register. The dwarf offset is
331   // VLENB * scalable_offset / 8.
332   assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
333 
334   // Add fixed-sized offset using existing DIExpression interface.
335   DIExpression::appendOffset(Ops, Offset.getFixed());
336 
337   unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
338   int64_t VLENBSized = Offset.getScalable() / 8;
339   if (VLENBSized > 0) {
340     Ops.push_back(dwarf::DW_OP_constu);
341     Ops.push_back(VLENBSized);
342     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
343     Ops.push_back(dwarf::DW_OP_mul);
344     Ops.push_back(dwarf::DW_OP_plus);
345   } else if (VLENBSized < 0) {
346     Ops.push_back(dwarf::DW_OP_constu);
347     Ops.push_back(-VLENBSized);
348     Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
349     Ops.push_back(dwarf::DW_OP_mul);
350     Ops.push_back(dwarf::DW_OP_minus);
351   }
352 }
353 
354 unsigned
355 RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const {
356   return MF.getSubtarget<RISCVSubtarget>().hasStdExtC() ? 1 : 0;
357 }
358