1 //===- X86RegisterBankInfo.cpp -----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the RegisterBankInfo class for X86.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12
13 #include "X86RegisterBankInfo.h"
14 #include "X86InstrInfo.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/RegisterBank.h"
17 #include "llvm/CodeGen/RegisterBankInfo.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
19
20 #define GET_TARGET_REGBANK_IMPL
21 #include "X86GenRegisterBank.inc"
22
23 using namespace llvm;
24 // This file will be TableGen'ed at some point.
25 #define GET_TARGET_REGBANK_INFO_IMPL
26 #include "X86GenRegisterBankInfo.def"
27
X86RegisterBankInfo(const TargetRegisterInfo & TRI)28 X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo &TRI) {
29
30 // validate RegBank initialization.
31 const RegisterBank &RBGPR = getRegBank(X86::GPRRegBankID);
32 (void)RBGPR;
33 assert(&X86::GPRRegBank == &RBGPR && "Incorrect RegBanks inizalization.");
34
35 // The GPR register bank is fully defined by all the registers in
36 // GR64 + its subclasses.
37 assert(RBGPR.covers(*TRI.getRegClass(X86::GR64RegClassID)) &&
38 "Subclass not added?");
39 assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
40 }
41
42 const RegisterBank &
getRegBankFromRegClass(const TargetRegisterClass & RC,LLT) const43 X86RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
44 LLT) const {
45
46 if (X86::GR8RegClass.hasSubClassEq(&RC) ||
47 X86::GR16RegClass.hasSubClassEq(&RC) ||
48 X86::GR32RegClass.hasSubClassEq(&RC) ||
49 X86::GR64RegClass.hasSubClassEq(&RC) ||
50 X86::LOW32_ADDR_ACCESSRegClass.hasSubClassEq(&RC) ||
51 X86::LOW32_ADDR_ACCESS_RBPRegClass.hasSubClassEq(&RC))
52 return getRegBank(X86::GPRRegBankID);
53
54 if (X86::FR32XRegClass.hasSubClassEq(&RC) ||
55 X86::FR64XRegClass.hasSubClassEq(&RC) ||
56 X86::VR128XRegClass.hasSubClassEq(&RC) ||
57 X86::VR256XRegClass.hasSubClassEq(&RC) ||
58 X86::VR512RegClass.hasSubClassEq(&RC))
59 return getRegBank(X86::VECRRegBankID);
60
61 llvm_unreachable("Unsupported register kind yet.");
62 }
63
64 X86GenRegisterBankInfo::PartialMappingIdx
getPartialMappingIdx(const LLT & Ty,bool isFP)65 X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
66 if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
67 switch (Ty.getSizeInBits()) {
68 case 1:
69 case 8:
70 return PMI_GPR8;
71 case 16:
72 return PMI_GPR16;
73 case 32:
74 return PMI_GPR32;
75 case 64:
76 return PMI_GPR64;
77 case 128:
78 return PMI_VEC128;
79 break;
80 default:
81 llvm_unreachable("Unsupported register size.");
82 }
83 } else if (Ty.isScalar()) {
84 switch (Ty.getSizeInBits()) {
85 case 32:
86 return PMI_FP32;
87 case 64:
88 return PMI_FP64;
89 case 128:
90 return PMI_VEC128;
91 default:
92 llvm_unreachable("Unsupported register size.");
93 }
94 } else {
95 switch (Ty.getSizeInBits()) {
96 case 128:
97 return PMI_VEC128;
98 case 256:
99 return PMI_VEC256;
100 case 512:
101 return PMI_VEC512;
102 default:
103 llvm_unreachable("Unsupported register size.");
104 }
105 }
106
107 return PMI_None;
108 }
109
getInstrPartialMappingIdxs(const MachineInstr & MI,const MachineRegisterInfo & MRI,const bool isFP,SmallVectorImpl<PartialMappingIdx> & OpRegBankIdx)110 void X86RegisterBankInfo::getInstrPartialMappingIdxs(
111 const MachineInstr &MI, const MachineRegisterInfo &MRI, const bool isFP,
112 SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx) {
113
114 unsigned NumOperands = MI.getNumOperands();
115 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
116 auto &MO = MI.getOperand(Idx);
117 if (!MO.isReg() || !MO.getReg())
118 OpRegBankIdx[Idx] = PMI_None;
119 else
120 OpRegBankIdx[Idx] = getPartialMappingIdx(MRI.getType(MO.getReg()), isFP);
121 }
122 }
123
getInstrValueMapping(const MachineInstr & MI,const SmallVectorImpl<PartialMappingIdx> & OpRegBankIdx,SmallVectorImpl<const ValueMapping * > & OpdsMapping)124 bool X86RegisterBankInfo::getInstrValueMapping(
125 const MachineInstr &MI,
126 const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
127 SmallVectorImpl<const ValueMapping *> &OpdsMapping) {
128
129 unsigned NumOperands = MI.getNumOperands();
130 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
131 if (!MI.getOperand(Idx).isReg())
132 continue;
133 if (!MI.getOperand(Idx).getReg())
134 continue;
135
136 auto Mapping = getValueMapping(OpRegBankIdx[Idx], 1);
137 if (!Mapping->isValid())
138 return false;
139
140 OpdsMapping[Idx] = Mapping;
141 }
142 return true;
143 }
144
145 const RegisterBankInfo::InstructionMapping &
getSameOperandsMapping(const MachineInstr & MI,bool isFP) const146 X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
147 bool isFP) const {
148 const MachineFunction &MF = *MI.getParent()->getParent();
149 const MachineRegisterInfo &MRI = MF.getRegInfo();
150
151 unsigned NumOperands = MI.getNumOperands();
152 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
153
154 if (NumOperands != 3 || (Ty != MRI.getType(MI.getOperand(1).getReg())) ||
155 (Ty != MRI.getType(MI.getOperand(2).getReg())))
156 llvm_unreachable("Unsupported operand mapping yet.");
157
158 auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
159 return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
160 }
161
162 const RegisterBankInfo::InstructionMapping &
getInstrMapping(const MachineInstr & MI) const163 X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
164 const MachineFunction &MF = *MI.getParent()->getParent();
165 const MachineRegisterInfo &MRI = MF.getRegInfo();
166 unsigned Opc = MI.getOpcode();
167
168 // Try the default logic for non-generic instructions that are either copies
169 // or already have some operands assigned to banks.
170 if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
171 const InstructionMapping &Mapping = getInstrMappingImpl(MI);
172 if (Mapping.isValid())
173 return Mapping;
174 }
175
176 switch (Opc) {
177 case TargetOpcode::G_ADD:
178 case TargetOpcode::G_SUB:
179 case TargetOpcode::G_MUL:
180 return getSameOperandsMapping(MI, false);
181 case TargetOpcode::G_FADD:
182 case TargetOpcode::G_FSUB:
183 case TargetOpcode::G_FMUL:
184 case TargetOpcode::G_FDIV:
185 return getSameOperandsMapping(MI, true);
186 case TargetOpcode::G_SHL:
187 case TargetOpcode::G_LSHR:
188 case TargetOpcode::G_ASHR: {
189 unsigned NumOperands = MI.getNumOperands();
190 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
191
192 auto Mapping = getValueMapping(getPartialMappingIdx(Ty, false), 3);
193 return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
194
195 }
196 default:
197 break;
198 }
199
200 unsigned NumOperands = MI.getNumOperands();
201 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
202
203 switch (Opc) {
204 case TargetOpcode::G_FPEXT:
205 case TargetOpcode::G_FPTRUNC:
206 case TargetOpcode::G_FCONSTANT:
207 // Instruction having only floating-point operands (all scalars in VECRReg)
208 getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
209 break;
210 case TargetOpcode::G_SITOFP:
211 case TargetOpcode::G_FPTOSI: {
212 // Some of the floating-point instructions have mixed GPR and FP operands:
213 // fine-tune the computed mapping.
214 auto &Op0 = MI.getOperand(0);
215 auto &Op1 = MI.getOperand(1);
216 const LLT Ty0 = MRI.getType(Op0.getReg());
217 const LLT Ty1 = MRI.getType(Op1.getReg());
218
219 bool FirstArgIsFP = Opc == TargetOpcode::G_SITOFP;
220 bool SecondArgIsFP = Opc == TargetOpcode::G_FPTOSI;
221 OpRegBankIdx[0] = getPartialMappingIdx(Ty0, /* isFP */ FirstArgIsFP);
222 OpRegBankIdx[1] = getPartialMappingIdx(Ty1, /* isFP */ SecondArgIsFP);
223 break;
224 }
225 case TargetOpcode::G_FCMP: {
226 LLT Ty1 = MRI.getType(MI.getOperand(2).getReg());
227 LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
228 (void)Ty2;
229 assert(Ty1.getSizeInBits() == Ty2.getSizeInBits() &&
230 "Mismatched operand sizes for G_FCMP");
231
232 unsigned Size = Ty1.getSizeInBits();
233 (void)Size;
234 assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP");
235
236 auto FpRegBank = getPartialMappingIdx(Ty1, /* isFP */ true);
237 OpRegBankIdx = {PMI_GPR8,
238 /* Predicate */ PMI_None, FpRegBank, FpRegBank};
239 break;
240 }
241 case TargetOpcode::G_TRUNC:
242 case TargetOpcode::G_ANYEXT: {
243 auto &Op0 = MI.getOperand(0);
244 auto &Op1 = MI.getOperand(1);
245 const LLT Ty0 = MRI.getType(Op0.getReg());
246 const LLT Ty1 = MRI.getType(Op1.getReg());
247
248 bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
249 Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
250 bool isFPAnyExt =
251 Ty0.getSizeInBits() == 128 &&
252 (Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
253 Opc == TargetOpcode::G_ANYEXT;
254
255 getInstrPartialMappingIdxs(MI, MRI, /* isFP */ isFPTrunc || isFPAnyExt,
256 OpRegBankIdx);
257 } break;
258 default:
259 // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
260 getInstrPartialMappingIdxs(MI, MRI, /* isFP */ false, OpRegBankIdx);
261 break;
262 }
263
264 // Finally construct the computed mapping.
265 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
266 if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
267 return getInvalidInstructionMapping();
268
269 return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
270 getOperandsMapping(OpdsMapping), NumOperands);
271 }
272
applyMappingImpl(const OperandsMapper & OpdMapper) const273 void X86RegisterBankInfo::applyMappingImpl(
274 const OperandsMapper &OpdMapper) const {
275 return applyDefaultMapping(OpdMapper);
276 }
277
278 RegisterBankInfo::InstructionMappings
getInstrAlternativeMappings(const MachineInstr & MI) const279 X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
280
281 const MachineFunction &MF = *MI.getParent()->getParent();
282 const TargetSubtargetInfo &STI = MF.getSubtarget();
283 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
284 const MachineRegisterInfo &MRI = MF.getRegInfo();
285
286 switch (MI.getOpcode()) {
287 case TargetOpcode::G_LOAD:
288 case TargetOpcode::G_STORE:
289 case TargetOpcode::G_IMPLICIT_DEF: {
290 // we going to try to map 32/64 bit to PMI_FP32/PMI_FP64
291 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
292 if (Size != 32 && Size != 64)
293 break;
294
295 unsigned NumOperands = MI.getNumOperands();
296
297 // Track the bank of each register, use FP mapping (all scalars in VEC)
298 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
299 getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
300
301 // Finally construct the computed mapping.
302 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
303 if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
304 break;
305
306 const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
307 /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
308 InstructionMappings AltMappings;
309 AltMappings.push_back(&Mapping);
310 return AltMappings;
311 }
312 default:
313 break;
314 }
315 return RegisterBankInfo::getInstrAlternativeMappings(MI);
316 }
317