1 //===- AArch64RegisterBankInfo.cpp ----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the RegisterBankInfo class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterBankInfo.h"
15 #include "AArch64InstrInfo.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
18 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
19 #include "llvm/CodeGen/LowLevelType.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineOperand.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetOpcodes.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include <algorithm>
29 #include <cassert>
30 
31 #define GET_TARGET_REGBANK_IMPL
32 #include "AArch64GenRegisterBank.inc"
33 
34 // This file will be TableGen'ed at some point.
35 #include "AArch64GenRegisterBankInfo.def"
36 
37 using namespace llvm;
38 
AArch64RegisterBankInfo(const TargetRegisterInfo & TRI)39 AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
40     : AArch64GenRegisterBankInfo() {
41   static bool AlreadyInit = false;
42   // We have only one set of register banks, whatever the subtarget
43   // is. Therefore, the initialization of the RegBanks table should be
44   // done only once. Indeed the table of all register banks
45   // (AArch64::RegBanks) is unique in the compiler. At some point, it
46   // will get tablegen'ed and the whole constructor becomes empty.
47   if (AlreadyInit)
48     return;
49   AlreadyInit = true;
50 
51   const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
52   (void)RBGPR;
53   assert(&AArch64::GPRRegBank == &RBGPR &&
54          "The order in RegBanks is messed up");
55 
56   const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
57   (void)RBFPR;
58   assert(&AArch64::FPRRegBank == &RBFPR &&
59          "The order in RegBanks is messed up");
60 
61   const RegisterBank &RBCCR = getRegBank(AArch64::CCRegBankID);
62   (void)RBCCR;
63   assert(&AArch64::CCRegBank == &RBCCR && "The order in RegBanks is messed up");
64 
65   // The GPR register bank is fully defined by all the registers in
66   // GR64all + its subclasses.
67   assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
68          "Subclass not added?");
69   assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
70 
71   // The FPR register bank is fully defined by all the registers in
72   // GR64all + its subclasses.
73   assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
74          "Subclass not added?");
75   assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
76          "Subclass not added?");
77   assert(RBFPR.getSize() == 512 &&
78          "FPRs should hold up to 512-bit via QQQQ sequence");
79 
80   assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
81          "Class not added?");
82   assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");
83 
84   // Check that the TableGen'ed like file is in sync we our expectations.
85   // First, the Idx.
86   assert(checkPartialMappingIdx(PMI_FirstGPR, PMI_LastGPR,
87                                 {PMI_GPR32, PMI_GPR64}) &&
88          "PartialMappingIdx's are incorrectly ordered");
89   assert(checkPartialMappingIdx(PMI_FirstFPR, PMI_LastFPR,
90                                 {PMI_FPR16, PMI_FPR32, PMI_FPR64, PMI_FPR128,
91                                  PMI_FPR256, PMI_FPR512}) &&
92          "PartialMappingIdx's are incorrectly ordered");
93 // Now, the content.
94 // Check partial mapping.
95 #define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)                      \
96   do {                                                                         \
97     assert(                                                                    \
98         checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
99         #Idx " is incorrectly initialized");                                   \
100   } while (false)
101 
102   CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
103   CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
104   CHECK_PARTIALMAP(PMI_FPR16, 0, 16, RBFPR);
105   CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
106   CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
107   CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
108   CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
109   CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
110 
111 // Check value mapping.
112 #define CHECK_VALUEMAP_IMPL(RBName, Size, Offset)                              \
113   do {                                                                         \
114     assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size,            \
115                              PartialMappingIdx::PMI_First##RBName, Size,       \
116                              Offset) &&                                        \
117            #RBName #Size " " #Offset " is incorrectly initialized");           \
118   } while (false)
119 
120 #define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
121 
122   CHECK_VALUEMAP(GPR, 32);
123   CHECK_VALUEMAP(GPR, 64);
124   CHECK_VALUEMAP(FPR, 16);
125   CHECK_VALUEMAP(FPR, 32);
126   CHECK_VALUEMAP(FPR, 64);
127   CHECK_VALUEMAP(FPR, 128);
128   CHECK_VALUEMAP(FPR, 256);
129   CHECK_VALUEMAP(FPR, 512);
130 
131 // Check the value mapping for 3-operands instructions where all the operands
132 // map to the same value mapping.
133 #define CHECK_VALUEMAP_3OPS(RBName, Size)                                      \
134   do {                                                                         \
135     CHECK_VALUEMAP_IMPL(RBName, Size, 0);                                      \
136     CHECK_VALUEMAP_IMPL(RBName, Size, 1);                                      \
137     CHECK_VALUEMAP_IMPL(RBName, Size, 2);                                      \
138   } while (false)
139 
140   CHECK_VALUEMAP_3OPS(GPR, 32);
141   CHECK_VALUEMAP_3OPS(GPR, 64);
142   CHECK_VALUEMAP_3OPS(FPR, 32);
143   CHECK_VALUEMAP_3OPS(FPR, 64);
144   CHECK_VALUEMAP_3OPS(FPR, 128);
145   CHECK_VALUEMAP_3OPS(FPR, 256);
146   CHECK_VALUEMAP_3OPS(FPR, 512);
147 
148 #define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)                 \
149   do {                                                                         \
150     unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min;               \
151     unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min;               \
152     (void)PartialMapDstIdx;                                                    \
153     (void)PartialMapSrcIdx;                                                    \
154     const ValueMapping *Map = getCopyMapping(                                  \
155         AArch64::RBNameDst##RegBankID, AArch64::RBNameSrc##RegBankID, Size);  \
156     (void)Map;                                                                 \
157     assert(Map[0].BreakDown ==                                                 \
158                &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] &&  \
159            Map[0].NumBreakDowns == 1 && #RBNameDst #Size                       \
160            " Dst is incorrectly initialized");                                 \
161     assert(Map[1].BreakDown ==                                                 \
162                &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] &&  \
163            Map[1].NumBreakDowns == 1 && #RBNameSrc #Size                       \
164            " Src is incorrectly initialized");                                 \
165                                                                                \
166   } while (false)
167 
168   CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
169   CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 32);
170   CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
171   CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 64);
172   CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 32);
173   CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 32);
174   CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 64);
175   CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 64);
176 
177 #define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)                                 \
178   do {                                                                         \
179     unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min;                    \
180     unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min;                    \
181     (void)PartialMapDstIdx;                                                    \
182     (void)PartialMapSrcIdx;                                                    \
183     const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize);               \
184     (void)Map;                                                                 \
185     assert(Map[0].BreakDown ==                                                 \
186                &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] &&  \
187            Map[0].NumBreakDowns == 1 && "FPR" #DstSize                         \
188                                         " Dst is incorrectly initialized");    \
189     assert(Map[1].BreakDown ==                                                 \
190                &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] &&  \
191            Map[1].NumBreakDowns == 1 && "FPR" #SrcSize                         \
192                                         " Src is incorrectly initialized");    \
193                                                                                \
194   } while (false)
195 
196   CHECK_VALUEMAP_FPEXT(32, 16);
197   CHECK_VALUEMAP_FPEXT(64, 16);
198   CHECK_VALUEMAP_FPEXT(64, 32);
199   CHECK_VALUEMAP_FPEXT(128, 64);
200 
201   assert(verify(TRI) && "Invalid register bank information");
202 }
203 
copyCost(const RegisterBank & A,const RegisterBank & B,unsigned Size) const204 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
205                                            const RegisterBank &B,
206                                            unsigned Size) const {
207   // What do we do with different size?
208   // copy are same size.
209   // Will introduce other hooks for different size:
210   // * extract cost.
211   // * build_sequence cost.
212 
213   // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
214   // FIXME: This should be deduced from the scheduling model.
215   if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
216     // FMOVXDr or FMOVWSr.
217     return 5;
218   if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
219     // FMOVDXr or FMOVSWr.
220     return 4;
221 
222   return RegisterBankInfo::copyCost(A, B, Size);
223 }
224 
getRegBankFromRegClass(const TargetRegisterClass & RC) const225 const RegisterBank &AArch64RegisterBankInfo::getRegBankFromRegClass(
226     const TargetRegisterClass &RC) const {
227   switch (RC.getID()) {
228   case AArch64::FPR8RegClassID:
229   case AArch64::FPR16RegClassID:
230   case AArch64::FPR32RegClassID:
231   case AArch64::FPR64RegClassID:
232   case AArch64::FPR128RegClassID:
233   case AArch64::FPR128_loRegClassID:
234   case AArch64::DDRegClassID:
235   case AArch64::DDDRegClassID:
236   case AArch64::DDDDRegClassID:
237   case AArch64::QQRegClassID:
238   case AArch64::QQQRegClassID:
239   case AArch64::QQQQRegClassID:
240     return getRegBank(AArch64::FPRRegBankID);
241   case AArch64::GPR32commonRegClassID:
242   case AArch64::GPR32RegClassID:
243   case AArch64::GPR32spRegClassID:
244   case AArch64::GPR32sponlyRegClassID:
245   case AArch64::GPR32argRegClassID:
246   case AArch64::GPR32allRegClassID:
247   case AArch64::GPR64commonRegClassID:
248   case AArch64::GPR64RegClassID:
249   case AArch64::GPR64spRegClassID:
250   case AArch64::GPR64sponlyRegClassID:
251   case AArch64::GPR64argRegClassID:
252   case AArch64::GPR64allRegClassID:
253   case AArch64::GPR64noipRegClassID:
254   case AArch64::GPR64common_and_GPR64noipRegClassID:
255   case AArch64::GPR64noip_and_tcGPR64RegClassID:
256   case AArch64::tcGPR64RegClassID:
257   case AArch64::WSeqPairsClassRegClassID:
258   case AArch64::XSeqPairsClassRegClassID:
259     return getRegBank(AArch64::GPRRegBankID);
260   case AArch64::CCRRegClassID:
261     return getRegBank(AArch64::CCRegBankID);
262   default:
263     llvm_unreachable("Register class not supported");
264   }
265 }
266 
267 RegisterBankInfo::InstructionMappings
getInstrAlternativeMappings(const MachineInstr & MI) const268 AArch64RegisterBankInfo::getInstrAlternativeMappings(
269     const MachineInstr &MI) const {
270   const MachineFunction &MF = *MI.getParent()->getParent();
271   const TargetSubtargetInfo &STI = MF.getSubtarget();
272   const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
273   const MachineRegisterInfo &MRI = MF.getRegInfo();
274 
275   switch (MI.getOpcode()) {
276   case TargetOpcode::G_OR: {
277     // 32 and 64-bit or can be mapped on either FPR or
278     // GPR for the same cost.
279     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
280     if (Size != 32 && Size != 64)
281       break;
282 
283     // If the instruction has any implicit-defs or uses,
284     // do not mess with it.
285     if (MI.getNumOperands() != 3)
286       break;
287     InstructionMappings AltMappings;
288     const InstructionMapping &GPRMapping = getInstructionMapping(
289         /*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
290         /*NumOperands*/ 3);
291     const InstructionMapping &FPRMapping = getInstructionMapping(
292         /*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
293         /*NumOperands*/ 3);
294 
295     AltMappings.push_back(&GPRMapping);
296     AltMappings.push_back(&FPRMapping);
297     return AltMappings;
298   }
299   case TargetOpcode::G_BITCAST: {
300     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
301     if (Size != 32 && Size != 64)
302       break;
303 
304     // If the instruction has any implicit-defs or uses,
305     // do not mess with it.
306     if (MI.getNumOperands() != 2)
307       break;
308 
309     InstructionMappings AltMappings;
310     const InstructionMapping &GPRMapping = getInstructionMapping(
311         /*ID*/ 1, /*Cost*/ 1,
312         getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
313         /*NumOperands*/ 2);
314     const InstructionMapping &FPRMapping = getInstructionMapping(
315         /*ID*/ 2, /*Cost*/ 1,
316         getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
317         /*NumOperands*/ 2);
318     const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
319         /*ID*/ 3,
320         /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
321         getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
322         /*NumOperands*/ 2);
323     const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
324         /*ID*/ 3,
325         /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
326         getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
327         /*NumOperands*/ 2);
328 
329     AltMappings.push_back(&GPRMapping);
330     AltMappings.push_back(&FPRMapping);
331     AltMappings.push_back(&GPRToFPRMapping);
332     AltMappings.push_back(&FPRToGPRMapping);
333     return AltMappings;
334   }
335   case TargetOpcode::G_LOAD: {
336     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
337     if (Size != 64)
338       break;
339 
340     // If the instruction has any implicit-defs or uses,
341     // do not mess with it.
342     if (MI.getNumOperands() != 2)
343       break;
344 
345     InstructionMappings AltMappings;
346     const InstructionMapping &GPRMapping = getInstructionMapping(
347         /*ID*/ 1, /*Cost*/ 1,
348         getOperandsMapping({getValueMapping(PMI_FirstGPR, Size),
349                             // Addresses are GPR 64-bit.
350                             getValueMapping(PMI_FirstGPR, 64)}),
351         /*NumOperands*/ 2);
352     const InstructionMapping &FPRMapping = getInstructionMapping(
353         /*ID*/ 2, /*Cost*/ 1,
354         getOperandsMapping({getValueMapping(PMI_FirstFPR, Size),
355                             // Addresses are GPR 64-bit.
356                             getValueMapping(PMI_FirstGPR, 64)}),
357         /*NumOperands*/ 2);
358 
359     AltMappings.push_back(&GPRMapping);
360     AltMappings.push_back(&FPRMapping);
361     return AltMappings;
362   }
363   default:
364     break;
365   }
366   return RegisterBankInfo::getInstrAlternativeMappings(MI);
367 }
368 
applyMappingImpl(const OperandsMapper & OpdMapper) const369 void AArch64RegisterBankInfo::applyMappingImpl(
370     const OperandsMapper &OpdMapper) const {
371   switch (OpdMapper.getMI().getOpcode()) {
372   case TargetOpcode::G_OR:
373   case TargetOpcode::G_BITCAST:
374   case TargetOpcode::G_LOAD:
375     // Those ID must match getInstrAlternativeMappings.
376     assert((OpdMapper.getInstrMapping().getID() >= 1 &&
377             OpdMapper.getInstrMapping().getID() <= 4) &&
378            "Don't know how to handle that ID");
379     return applyDefaultMapping(OpdMapper);
380   default:
381     llvm_unreachable("Don't know how to handle that operation");
382   }
383 }
384 
385 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
386 /// having only floating-point operands.
isPreISelGenericFloatingPointOpcode(unsigned Opc)387 static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
388   switch (Opc) {
389   case TargetOpcode::G_FADD:
390   case TargetOpcode::G_FSUB:
391   case TargetOpcode::G_FMUL:
392   case TargetOpcode::G_FMA:
393   case TargetOpcode::G_FDIV:
394   case TargetOpcode::G_FCONSTANT:
395   case TargetOpcode::G_FPEXT:
396   case TargetOpcode::G_FPTRUNC:
397   case TargetOpcode::G_FCEIL:
398   case TargetOpcode::G_FFLOOR:
399   case TargetOpcode::G_FNEARBYINT:
400   case TargetOpcode::G_FNEG:
401   case TargetOpcode::G_FCOS:
402   case TargetOpcode::G_FSIN:
403   case TargetOpcode::G_FLOG10:
404   case TargetOpcode::G_FLOG:
405   case TargetOpcode::G_FLOG2:
406   case TargetOpcode::G_FSQRT:
407   case TargetOpcode::G_FABS:
408   case TargetOpcode::G_FEXP:
409   case TargetOpcode::G_FRINT:
410   case TargetOpcode::G_INTRINSIC_TRUNC:
411   case TargetOpcode::G_INTRINSIC_ROUND:
412     return true;
413   }
414   return false;
415 }
416 
417 const RegisterBankInfo::InstructionMapping &
getSameKindOfOperandsMapping(const MachineInstr & MI) const418 AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
419     const MachineInstr &MI) const {
420   const unsigned Opc = MI.getOpcode();
421   const MachineFunction &MF = *MI.getParent()->getParent();
422   const MachineRegisterInfo &MRI = MF.getRegInfo();
423 
424   unsigned NumOperands = MI.getNumOperands();
425   assert(NumOperands <= 3 &&
426          "This code is for instructions with 3 or less operands");
427 
428   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
429   unsigned Size = Ty.getSizeInBits();
430   bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
431 
432   PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;
433 
434 #ifndef NDEBUG
435   // Make sure all the operands are using similar size and type.
436   // Should probably be checked by the machine verifier.
437   // This code won't catch cases where the number of lanes is
438   // different between the operands.
439   // If we want to go to that level of details, it is probably
440   // best to check that the types are the same, period.
441   // Currently, we just check that the register banks are the same
442   // for each types.
443   for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
444     LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
445     assert(
446         AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
447             RBIdx, OpTy.getSizeInBits()) ==
448             AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
449         "Operand has incompatible size");
450     bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
451     (void)OpIsFPR;
452     assert(IsFPR == OpIsFPR && "Operand has incompatible type");
453   }
454 #endif // End NDEBUG.
455 
456   return getInstructionMapping(DefaultMappingID, 1,
457                                getValueMapping(RBIdx, Size), NumOperands);
458 }
459 
hasFPConstraints(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const460 bool AArch64RegisterBankInfo::hasFPConstraints(
461     const MachineInstr &MI, const MachineRegisterInfo &MRI,
462     const TargetRegisterInfo &TRI) const {
463   unsigned Op = MI.getOpcode();
464 
465   // Do we have an explicit floating point instruction?
466   if (isPreISelGenericFloatingPointOpcode(Op))
467     return true;
468 
469   // No. Check if we have a copy-like instruction. If we do, then we could
470   // still be fed by floating point instructions.
471   if (Op != TargetOpcode::COPY && !MI.isPHI())
472     return false;
473 
474   // MI is copy-like. Return true if it outputs an FPR.
475   return getRegBank(MI.getOperand(0).getReg(), MRI, TRI) ==
476          &AArch64::FPRRegBank;
477 }
478 
onlyUsesFP(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const479 bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
480                                          const MachineRegisterInfo &MRI,
481                                          const TargetRegisterInfo &TRI) const {
482   switch (MI.getOpcode()) {
483   case TargetOpcode::G_FPTOSI:
484   case TargetOpcode::G_FPTOUI:
485   case TargetOpcode::G_FCMP:
486     return true;
487   default:
488     break;
489   }
490   return hasFPConstraints(MI, MRI, TRI);
491 }
492 
onlyDefinesFP(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const493 bool AArch64RegisterBankInfo::onlyDefinesFP(
494     const MachineInstr &MI, const MachineRegisterInfo &MRI,
495     const TargetRegisterInfo &TRI) const {
496   switch (MI.getOpcode()) {
497   case TargetOpcode::G_SITOFP:
498   case TargetOpcode::G_UITOFP:
499   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
500   case TargetOpcode::G_INSERT_VECTOR_ELT:
501     return true;
502   default:
503     break;
504   }
505   return hasFPConstraints(MI, MRI, TRI);
506 }
507 
508 const RegisterBankInfo::InstructionMapping &
getInstrMapping(const MachineInstr & MI) const509 AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
510   const unsigned Opc = MI.getOpcode();
511 
512   // Try the default logic for non-generic instructions that are either copies
513   // or already have some operands assigned to banks.
514   if ((Opc != TargetOpcode::COPY && !isPreISelGenericOpcode(Opc)) ||
515       Opc == TargetOpcode::G_PHI) {
516     const RegisterBankInfo::InstructionMapping &Mapping =
517         getInstrMappingImpl(MI);
518     if (Mapping.isValid())
519       return Mapping;
520   }
521 
522   const MachineFunction &MF = *MI.getParent()->getParent();
523   const MachineRegisterInfo &MRI = MF.getRegInfo();
524   const TargetSubtargetInfo &STI = MF.getSubtarget();
525   const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
526 
527   switch (Opc) {
528     // G_{F|S|U}REM are not listed because they are not legal.
529     // Arithmetic ops.
530   case TargetOpcode::G_ADD:
531   case TargetOpcode::G_SUB:
532   case TargetOpcode::G_GEP:
533   case TargetOpcode::G_MUL:
534   case TargetOpcode::G_SDIV:
535   case TargetOpcode::G_UDIV:
536     // Bitwise ops.
537   case TargetOpcode::G_AND:
538   case TargetOpcode::G_OR:
539   case TargetOpcode::G_XOR:
540     // Floating point ops.
541   case TargetOpcode::G_FADD:
542   case TargetOpcode::G_FSUB:
543   case TargetOpcode::G_FMUL:
544   case TargetOpcode::G_FDIV:
545     return getSameKindOfOperandsMapping(MI);
546   case TargetOpcode::G_FPEXT: {
547     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
548     LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
549     return getInstructionMapping(
550         DefaultMappingID, /*Cost*/ 1,
551         getFPExtMapping(DstTy.getSizeInBits(), SrcTy.getSizeInBits()),
552         /*NumOperands*/ 2);
553   }
554     // Shifts.
555   case TargetOpcode::G_SHL:
556   case TargetOpcode::G_LSHR:
557   case TargetOpcode::G_ASHR: {
558     LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
559     LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
560     if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
561       return getInstructionMapping(DefaultMappingID, 1,
562                                    &ValMappings[Shift64Imm], 3);
563     return getSameKindOfOperandsMapping(MI);
564   }
565   case TargetOpcode::COPY: {
566     unsigned DstReg = MI.getOperand(0).getReg();
567     unsigned SrcReg = MI.getOperand(1).getReg();
568     // Check if one of the register is not a generic register.
569     if ((TargetRegisterInfo::isPhysicalRegister(DstReg) ||
570          !MRI.getType(DstReg).isValid()) ||
571         (TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
572          !MRI.getType(SrcReg).isValid())) {
573       const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
574       const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
575       if (!DstRB)
576         DstRB = SrcRB;
577       else if (!SrcRB)
578         SrcRB = DstRB;
579       // If both RB are null that means both registers are generic.
580       // We shouldn't be here.
581       assert(DstRB && SrcRB && "Both RegBank were nullptr");
582       unsigned Size = getSizeInBits(DstReg, MRI, TRI);
583       return getInstructionMapping(
584           DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
585           getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
586           // We only care about the mapping of the destination.
587           /*NumOperands*/ 1);
588     }
589     // Both registers are generic, use G_BITCAST.
590     LLVM_FALLTHROUGH;
591   }
592   case TargetOpcode::G_BITCAST: {
593     LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
594     LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
595     unsigned Size = DstTy.getSizeInBits();
596     bool DstIsGPR = !DstTy.isVector() && DstTy.getSizeInBits() <= 64;
597     bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
598     const RegisterBank &DstRB =
599         DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
600     const RegisterBank &SrcRB =
601         SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
602     return getInstructionMapping(
603         DefaultMappingID, copyCost(DstRB, SrcRB, Size),
604         getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
605         // We only care about the mapping of the destination for COPY.
606         /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
607   }
608   default:
609     break;
610   }
611 
612   unsigned NumOperands = MI.getNumOperands();
613 
614   // Track the size and bank of each register.  We don't do partial mappings.
615   SmallVector<unsigned, 4> OpSize(NumOperands);
616   SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
617   for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
618     auto &MO = MI.getOperand(Idx);
619     if (!MO.isReg() || !MO.getReg())
620       continue;
621 
622     LLT Ty = MRI.getType(MO.getReg());
623     OpSize[Idx] = Ty.getSizeInBits();
624 
625     // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
626     // For floating-point instructions, scalars go in FPRs.
627     if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc) ||
628         Ty.getSizeInBits() > 64)
629       OpRegBankIdx[Idx] = PMI_FirstFPR;
630     else
631       OpRegBankIdx[Idx] = PMI_FirstGPR;
632   }
633 
634   unsigned Cost = 1;
635   // Some of the floating-point instructions have mixed GPR and FPR operands:
636   // fine-tune the computed mapping.
637   switch (Opc) {
638   case TargetOpcode::G_SITOFP:
639   case TargetOpcode::G_UITOFP:
640     if (MRI.getType(MI.getOperand(0).getReg()).isVector())
641       break;
642     OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
643     break;
644   case TargetOpcode::G_FPTOSI:
645   case TargetOpcode::G_FPTOUI:
646     if (MRI.getType(MI.getOperand(0).getReg()).isVector())
647       break;
648     OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
649     break;
650   case TargetOpcode::G_FCMP:
651     OpRegBankIdx = {PMI_FirstGPR,
652                     /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
653     break;
654   case TargetOpcode::G_BITCAST:
655     // This is going to be a cross register bank copy and this is expensive.
656     if (OpRegBankIdx[0] != OpRegBankIdx[1])
657       Cost = copyCost(
658           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
659           *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
660           OpSize[0]);
661     break;
662   case TargetOpcode::G_LOAD:
663     // Loading in vector unit is slightly more expensive.
664     // This is actually only true for the LD1R and co instructions,
665     // but anyway for the fast mode this number does not matter and
666     // for the greedy mode the cost of the cross bank copy will
667     // offset this number.
668     // FIXME: Should be derived from the scheduling model.
669     if (OpRegBankIdx[0] != PMI_FirstGPR)
670       Cost = 2;
671     else
672       // Check if that load feeds fp instructions.
673       // In that case, we want the default mapping to be on FPR
674       // instead of blind map every scalar to GPR.
675       for (const MachineInstr &UseMI :
676            MRI.use_instructions(MI.getOperand(0).getReg())) {
677         // If we have at least one direct use in a FP instruction,
678         // assume this was a floating point load in the IR.
679         // If it was not, we would have had a bitcast before
680         // reaching that instruction.
681         if (onlyUsesFP(UseMI, MRI, TRI)) {
682           OpRegBankIdx[0] = PMI_FirstFPR;
683           break;
684         }
685       }
686     break;
687   case TargetOpcode::G_STORE:
688     // Check if that store is fed by fp instructions.
689     if (OpRegBankIdx[0] == PMI_FirstGPR) {
690       unsigned VReg = MI.getOperand(0).getReg();
691       if (!VReg)
692         break;
693       MachineInstr *DefMI = MRI.getVRegDef(VReg);
694       if (onlyDefinesFP(*DefMI, MRI, TRI))
695         OpRegBankIdx[0] = PMI_FirstFPR;
696       break;
697     }
698     break;
699   case TargetOpcode::G_SELECT: {
700     // If the destination is FPR, preserve that.
701     if (OpRegBankIdx[0] != PMI_FirstGPR)
702       break;
703 
704     // If we're taking in vectors, we have no choice but to put everything on
705     // FPRs.
706     LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
707     if (SrcTy.isVector()) {
708       for (unsigned Idx = 0; Idx < 4; ++Idx)
709         OpRegBankIdx[Idx] = PMI_FirstFPR;
710       break;
711     }
712 
713     // Try to minimize the number of copies. If we have more floating point
714     // constrained values than not, then we'll put everything on FPR. Otherwise,
715     // everything has to be on GPR.
716     unsigned NumFP = 0;
717 
718     // Check if the uses of the result always produce floating point values.
719     //
720     // For example:
721     //
722     // %z = G_SELECT %cond %x %y
723     // fpr = G_FOO %z ...
724     if (any_of(
725             MRI.use_instructions(MI.getOperand(0).getReg()),
726             [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
727       ++NumFP;
728 
729     // Check if the defs of the source values always produce floating point
730     // values.
731     //
732     // For example:
733     //
734     // %x = G_SOMETHING_ALWAYS_FLOAT %a ...
735     // %z = G_SELECT %cond %x %y
736     //
737     // Also check whether or not the sources have already been decided to be
738     // FPR. Keep track of this.
739     //
740     // This doesn't check the condition, since it's just whatever is in NZCV.
741     // This isn't passed explicitly in a register to fcsel/csel.
742     for (unsigned Idx = 2; Idx < 4; ++Idx) {
743       unsigned VReg = MI.getOperand(Idx).getReg();
744       MachineInstr *DefMI = MRI.getVRegDef(VReg);
745       if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
746           onlyDefinesFP(*DefMI, MRI, TRI))
747         ++NumFP;
748     }
749 
750     // If we have more FP constraints than not, then move everything over to
751     // FPR.
752     if (NumFP >= 2)
753       for (unsigned Idx = 0; Idx < 4; ++Idx)
754         OpRegBankIdx[Idx] = PMI_FirstFPR;
755 
756     break;
757   }
758   case TargetOpcode::G_UNMERGE_VALUES: {
759     // If the first operand belongs to a FPR register bank, then make sure that
760     // we preserve that.
761     if (OpRegBankIdx[0] != PMI_FirstGPR)
762       break;
763 
764     LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg());
765     // UNMERGE into scalars from a vector should always use FPR.
766     // Likewise if any of the uses are FP instructions.
767     if (SrcTy.isVector() ||
768         any_of(MRI.use_instructions(MI.getOperand(0).getReg()),
769                [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
770       // Set the register bank of every operand to FPR.
771       for (unsigned Idx = 0, NumOperands = MI.getNumOperands();
772            Idx < NumOperands; ++Idx)
773         OpRegBankIdx[Idx] = PMI_FirstFPR;
774     }
775     break;
776   }
777   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
778     // Destination and source need to be FPRs.
779     OpRegBankIdx[0] = PMI_FirstFPR;
780     OpRegBankIdx[1] = PMI_FirstFPR;
781 
782     // Index needs to be a GPR.
783     OpRegBankIdx[2] = PMI_FirstGPR;
784     break;
785   case TargetOpcode::G_INSERT_VECTOR_ELT:
786     OpRegBankIdx[0] = PMI_FirstFPR;
787     OpRegBankIdx[1] = PMI_FirstFPR;
788 
789     // The element may be either a GPR or FPR. Preserve that behaviour.
790     if (getRegBank(MI.getOperand(2).getReg(), MRI, TRI) == &AArch64::FPRRegBank)
791       OpRegBankIdx[2] = PMI_FirstFPR;
792     else
793       OpRegBankIdx[2] = PMI_FirstGPR;
794 
795     // Index needs to be a GPR.
796     OpRegBankIdx[3] = PMI_FirstGPR;
797     break;
798   case TargetOpcode::G_BUILD_VECTOR:
799     // If the first source operand belongs to a FPR register bank, then make
800     // sure that we preserve that.
801     if (OpRegBankIdx[1] != PMI_FirstGPR)
802       break;
803     unsigned VReg = MI.getOperand(1).getReg();
804     if (!VReg)
805       break;
806 
807     // Get the instruction that defined the source operand reg, and check if
808     // it's a floating point operation. Or, if it's a type like s16 which
809     // doesn't have a exact size gpr register class.
810     MachineInstr *DefMI = MRI.getVRegDef(VReg);
811     unsigned DefOpc = DefMI->getOpcode();
812     const LLT SrcTy = MRI.getType(VReg);
813     if (isPreISelGenericFloatingPointOpcode(DefOpc) ||
814         SrcTy.getSizeInBits() < 32) {
815       // Have a floating point op.
816       // Make sure every operand gets mapped to a FPR register class.
817       unsigned NumOperands = MI.getNumOperands();
818       for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
819         OpRegBankIdx[Idx] = PMI_FirstFPR;
820     }
821     break;
822   }
823 
824   // Finally construct the computed mapping.
825   SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
826   for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
827     if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
828       auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
829       if (!Mapping->isValid())
830         return getInvalidInstructionMapping();
831 
832       OpdsMapping[Idx] = Mapping;
833     }
834   }
835 
836   return getInstructionMapping(DefaultMappingID, Cost,
837                                getOperandsMapping(OpdsMapping), NumOperands);
838 }
839