1 //===- AArch64RegisterBankInfo.cpp ----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the RegisterBankInfo class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64RegisterBankInfo.h"
15 #include "AArch64InstrInfo.h"
16 #include "AArch64RegisterInfo.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
21 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
22 #include "llvm/CodeGen/GlobalISel/Utils.h"
23 #include "llvm/CodeGen/LowLevelType.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineOperand.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/CodeGen/TargetSubtargetInfo.h"
31 #include "llvm/IR/IntrinsicsAArch64.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include <algorithm>
34 #include <cassert>
35
36 #define GET_TARGET_REGBANK_IMPL
37 #include "AArch64GenRegisterBank.inc"
38
39 // This file will be TableGen'ed at some point.
40 #include "AArch64GenRegisterBankInfo.def"
41
42 using namespace llvm;
43
AArch64RegisterBankInfo(const TargetRegisterInfo & TRI)44 AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
45 : AArch64GenRegisterBankInfo() {
46 static llvm::once_flag InitializeRegisterBankFlag;
47
48 static auto InitializeRegisterBankOnce = [&]() {
49 // We have only one set of register banks, whatever the subtarget
50 // is. Therefore, the initialization of the RegBanks table should be
51 // done only once. Indeed the table of all register banks
52 // (AArch64::RegBanks) is unique in the compiler. At some point, it
53 // will get tablegen'ed and the whole constructor becomes empty.
54
55 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
56 (void)RBGPR;
57 assert(&AArch64::GPRRegBank == &RBGPR &&
58 "The order in RegBanks is messed up");
59
60 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
61 (void)RBFPR;
62 assert(&AArch64::FPRRegBank == &RBFPR &&
63 "The order in RegBanks is messed up");
64
65 const RegisterBank &RBCCR = getRegBank(AArch64::CCRegBankID);
66 (void)RBCCR;
67 assert(&AArch64::CCRegBank == &RBCCR &&
68 "The order in RegBanks is messed up");
69
70 // The GPR register bank is fully defined by all the registers in
71 // GR64all + its subclasses.
72 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
73 "Subclass not added?");
74 assert(RBGPR.getSize() == 128 && "GPRs should hold up to 128-bit");
75
76 // The FPR register bank is fully defined by all the registers in
77 // GR64all + its subclasses.
78 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
79 "Subclass not added?");
80 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
81 "Subclass not added?");
82 assert(RBFPR.getSize() == 512 &&
83 "FPRs should hold up to 512-bit via QQQQ sequence");
84
85 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
86 "Class not added?");
87 assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");
88
89 // Check that the TableGen'ed like file is in sync we our expectations.
90 // First, the Idx.
91 assert(checkPartialMappingIdx(PMI_FirstGPR, PMI_LastGPR,
92 {PMI_GPR32, PMI_GPR64, PMI_GPR128}) &&
93 "PartialMappingIdx's are incorrectly ordered");
94 assert(checkPartialMappingIdx(PMI_FirstFPR, PMI_LastFPR,
95 {PMI_FPR16, PMI_FPR32, PMI_FPR64, PMI_FPR128,
96 PMI_FPR256, PMI_FPR512}) &&
97 "PartialMappingIdx's are incorrectly ordered");
98 // Now, the content.
99 // Check partial mapping.
100 #define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
101 do { \
102 assert( \
103 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
104 #Idx " is incorrectly initialized"); \
105 } while (false)
106
107 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
108 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
109 CHECK_PARTIALMAP(PMI_GPR128, 0, 128, RBGPR);
110 CHECK_PARTIALMAP(PMI_FPR16, 0, 16, RBFPR);
111 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
112 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
113 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
114 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
115 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
116
117 // Check value mapping.
118 #define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
119 do { \
120 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
121 PartialMappingIdx::PMI_First##RBName, Size, \
122 Offset) && \
123 #RBName #Size " " #Offset " is incorrectly initialized"); \
124 } while (false)
125
126 #define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
127
128 CHECK_VALUEMAP(GPR, 32);
129 CHECK_VALUEMAP(GPR, 64);
130 CHECK_VALUEMAP(GPR, 128);
131 CHECK_VALUEMAP(FPR, 16);
132 CHECK_VALUEMAP(FPR, 32);
133 CHECK_VALUEMAP(FPR, 64);
134 CHECK_VALUEMAP(FPR, 128);
135 CHECK_VALUEMAP(FPR, 256);
136 CHECK_VALUEMAP(FPR, 512);
137
138 // Check the value mapping for 3-operands instructions where all the operands
139 // map to the same value mapping.
140 #define CHECK_VALUEMAP_3OPS(RBName, Size) \
141 do { \
142 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
143 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
144 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
145 } while (false)
146
147 CHECK_VALUEMAP_3OPS(GPR, 32);
148 CHECK_VALUEMAP_3OPS(GPR, 64);
149 CHECK_VALUEMAP_3OPS(GPR, 128);
150 CHECK_VALUEMAP_3OPS(FPR, 32);
151 CHECK_VALUEMAP_3OPS(FPR, 64);
152 CHECK_VALUEMAP_3OPS(FPR, 128);
153 CHECK_VALUEMAP_3OPS(FPR, 256);
154 CHECK_VALUEMAP_3OPS(FPR, 512);
155
156 #define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
157 do { \
158 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
159 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
160 (void)PartialMapDstIdx; \
161 (void)PartialMapSrcIdx; \
162 const ValueMapping *Map = getCopyMapping( \
163 AArch64::RBNameDst##RegBankID, AArch64::RBNameSrc##RegBankID, Size); \
164 (void)Map; \
165 assert(Map[0].BreakDown == \
166 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
167 Map[0].NumBreakDowns == 1 && #RBNameDst #Size \
168 " Dst is incorrectly initialized"); \
169 assert(Map[1].BreakDown == \
170 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
171 Map[1].NumBreakDowns == 1 && #RBNameSrc #Size \
172 " Src is incorrectly initialized"); \
173 \
174 } while (false)
175
176 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
177 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 32);
178 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
179 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 64);
180 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 32);
181 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 32);
182 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 64);
183 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 64);
184
185 #define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
186 do { \
187 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
188 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
189 (void)PartialMapDstIdx; \
190 (void)PartialMapSrcIdx; \
191 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
192 (void)Map; \
193 assert(Map[0].BreakDown == \
194 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
195 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
196 " Dst is incorrectly initialized"); \
197 assert(Map[1].BreakDown == \
198 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
199 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
200 " Src is incorrectly initialized"); \
201 \
202 } while (false)
203
204 CHECK_VALUEMAP_FPEXT(32, 16);
205 CHECK_VALUEMAP_FPEXT(64, 16);
206 CHECK_VALUEMAP_FPEXT(64, 32);
207 CHECK_VALUEMAP_FPEXT(128, 64);
208
209 assert(verify(TRI) && "Invalid register bank information");
210 };
211
212 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
213 }
214
copyCost(const RegisterBank & A,const RegisterBank & B,unsigned Size) const215 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
216 const RegisterBank &B,
217 unsigned Size) const {
218 // What do we do with different size?
219 // copy are same size.
220 // Will introduce other hooks for different size:
221 // * extract cost.
222 // * build_sequence cost.
223
224 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
225 // FIXME: This should be deduced from the scheduling model.
226 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
227 // FMOVXDr or FMOVWSr.
228 return 5;
229 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
230 // FMOVDXr or FMOVSWr.
231 return 4;
232
233 return RegisterBankInfo::copyCost(A, B, Size);
234 }
235
236 const RegisterBank &
getRegBankFromRegClass(const TargetRegisterClass & RC,LLT) const237 AArch64RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
238 LLT) const {
239 switch (RC.getID()) {
240 case AArch64::FPR8RegClassID:
241 case AArch64::FPR16RegClassID:
242 case AArch64::FPR16_loRegClassID:
243 case AArch64::FPR32_with_hsub_in_FPR16_loRegClassID:
244 case AArch64::FPR32RegClassID:
245 case AArch64::FPR64RegClassID:
246 case AArch64::FPR64_loRegClassID:
247 case AArch64::FPR128RegClassID:
248 case AArch64::FPR128_loRegClassID:
249 case AArch64::DDRegClassID:
250 case AArch64::DDDRegClassID:
251 case AArch64::DDDDRegClassID:
252 case AArch64::QQRegClassID:
253 case AArch64::QQQRegClassID:
254 case AArch64::QQQQRegClassID:
255 return getRegBank(AArch64::FPRRegBankID);
256 case AArch64::GPR32commonRegClassID:
257 case AArch64::GPR32RegClassID:
258 case AArch64::GPR32spRegClassID:
259 case AArch64::GPR32sponlyRegClassID:
260 case AArch64::GPR32argRegClassID:
261 case AArch64::GPR32allRegClassID:
262 case AArch64::GPR64commonRegClassID:
263 case AArch64::GPR64RegClassID:
264 case AArch64::GPR64spRegClassID:
265 case AArch64::GPR64sponlyRegClassID:
266 case AArch64::GPR64argRegClassID:
267 case AArch64::GPR64allRegClassID:
268 case AArch64::GPR64noipRegClassID:
269 case AArch64::GPR64common_and_GPR64noipRegClassID:
270 case AArch64::GPR64noip_and_tcGPR64RegClassID:
271 case AArch64::tcGPR64RegClassID:
272 case AArch64::rtcGPR64RegClassID:
273 case AArch64::WSeqPairsClassRegClassID:
274 case AArch64::XSeqPairsClassRegClassID:
275 case AArch64::MatrixIndexGPR32_12_15RegClassID:
276 case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_12_15RegClassID:
277 return getRegBank(AArch64::GPRRegBankID);
278 case AArch64::CCRRegClassID:
279 return getRegBank(AArch64::CCRegBankID);
280 default:
281 llvm_unreachable("Register class not supported");
282 }
283 }
284
285 RegisterBankInfo::InstructionMappings
getInstrAlternativeMappings(const MachineInstr & MI) const286 AArch64RegisterBankInfo::getInstrAlternativeMappings(
287 const MachineInstr &MI) const {
288 const MachineFunction &MF = *MI.getParent()->getParent();
289 const TargetSubtargetInfo &STI = MF.getSubtarget();
290 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
291 const MachineRegisterInfo &MRI = MF.getRegInfo();
292
293 switch (MI.getOpcode()) {
294 case TargetOpcode::G_OR: {
295 // 32 and 64-bit or can be mapped on either FPR or
296 // GPR for the same cost.
297 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
298 if (Size != 32 && Size != 64)
299 break;
300
301 // If the instruction has any implicit-defs or uses,
302 // do not mess with it.
303 if (MI.getNumOperands() != 3)
304 break;
305 InstructionMappings AltMappings;
306 const InstructionMapping &GPRMapping = getInstructionMapping(
307 /*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
308 /*NumOperands*/ 3);
309 const InstructionMapping &FPRMapping = getInstructionMapping(
310 /*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
311 /*NumOperands*/ 3);
312
313 AltMappings.push_back(&GPRMapping);
314 AltMappings.push_back(&FPRMapping);
315 return AltMappings;
316 }
317 case TargetOpcode::G_BITCAST: {
318 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
319 if (Size != 32 && Size != 64)
320 break;
321
322 // If the instruction has any implicit-defs or uses,
323 // do not mess with it.
324 if (MI.getNumOperands() != 2)
325 break;
326
327 InstructionMappings AltMappings;
328 const InstructionMapping &GPRMapping = getInstructionMapping(
329 /*ID*/ 1, /*Cost*/ 1,
330 getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
331 /*NumOperands*/ 2);
332 const InstructionMapping &FPRMapping = getInstructionMapping(
333 /*ID*/ 2, /*Cost*/ 1,
334 getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
335 /*NumOperands*/ 2);
336 const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
337 /*ID*/ 3,
338 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
339 getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
340 /*NumOperands*/ 2);
341 const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
342 /*ID*/ 3,
343 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
344 getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
345 /*NumOperands*/ 2);
346
347 AltMappings.push_back(&GPRMapping);
348 AltMappings.push_back(&FPRMapping);
349 AltMappings.push_back(&GPRToFPRMapping);
350 AltMappings.push_back(&FPRToGPRMapping);
351 return AltMappings;
352 }
353 case TargetOpcode::G_LOAD: {
354 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
355 if (Size != 64)
356 break;
357
358 // If the instruction has any implicit-defs or uses,
359 // do not mess with it.
360 if (MI.getNumOperands() != 2)
361 break;
362
363 InstructionMappings AltMappings;
364 const InstructionMapping &GPRMapping = getInstructionMapping(
365 /*ID*/ 1, /*Cost*/ 1,
366 getOperandsMapping({getValueMapping(PMI_FirstGPR, Size),
367 // Addresses are GPR 64-bit.
368 getValueMapping(PMI_FirstGPR, 64)}),
369 /*NumOperands*/ 2);
370 const InstructionMapping &FPRMapping = getInstructionMapping(
371 /*ID*/ 2, /*Cost*/ 1,
372 getOperandsMapping({getValueMapping(PMI_FirstFPR, Size),
373 // Addresses are GPR 64-bit.
374 getValueMapping(PMI_FirstGPR, 64)}),
375 /*NumOperands*/ 2);
376
377 AltMappings.push_back(&GPRMapping);
378 AltMappings.push_back(&FPRMapping);
379 return AltMappings;
380 }
381 default:
382 break;
383 }
384 return RegisterBankInfo::getInstrAlternativeMappings(MI);
385 }
386
applyMappingImpl(const OperandsMapper & OpdMapper) const387 void AArch64RegisterBankInfo::applyMappingImpl(
388 const OperandsMapper &OpdMapper) const {
389 switch (OpdMapper.getMI().getOpcode()) {
390 case TargetOpcode::G_OR:
391 case TargetOpcode::G_BITCAST:
392 case TargetOpcode::G_LOAD:
393 // Those ID must match getInstrAlternativeMappings.
394 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
395 OpdMapper.getInstrMapping().getID() <= 4) &&
396 "Don't know how to handle that ID");
397 return applyDefaultMapping(OpdMapper);
398 default:
399 llvm_unreachable("Don't know how to handle that operation");
400 }
401 }
402
403 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
404 /// having only floating-point operands.
isPreISelGenericFloatingPointOpcode(unsigned Opc)405 static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
406 switch (Opc) {
407 case TargetOpcode::G_FADD:
408 case TargetOpcode::G_FSUB:
409 case TargetOpcode::G_FMUL:
410 case TargetOpcode::G_FMA:
411 case TargetOpcode::G_FDIV:
412 case TargetOpcode::G_FCONSTANT:
413 case TargetOpcode::G_FPEXT:
414 case TargetOpcode::G_FPTRUNC:
415 case TargetOpcode::G_FCEIL:
416 case TargetOpcode::G_FFLOOR:
417 case TargetOpcode::G_FNEARBYINT:
418 case TargetOpcode::G_FNEG:
419 case TargetOpcode::G_FCOS:
420 case TargetOpcode::G_FSIN:
421 case TargetOpcode::G_FLOG10:
422 case TargetOpcode::G_FLOG:
423 case TargetOpcode::G_FLOG2:
424 case TargetOpcode::G_FSQRT:
425 case TargetOpcode::G_FABS:
426 case TargetOpcode::G_FEXP:
427 case TargetOpcode::G_FRINT:
428 case TargetOpcode::G_INTRINSIC_TRUNC:
429 case TargetOpcode::G_INTRINSIC_ROUND:
430 return true;
431 }
432 return false;
433 }
434
435 const RegisterBankInfo::InstructionMapping &
getSameKindOfOperandsMapping(const MachineInstr & MI) const436 AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
437 const MachineInstr &MI) const {
438 const unsigned Opc = MI.getOpcode();
439 const MachineFunction &MF = *MI.getParent()->getParent();
440 const MachineRegisterInfo &MRI = MF.getRegInfo();
441
442 unsigned NumOperands = MI.getNumOperands();
443 assert(NumOperands <= 3 &&
444 "This code is for instructions with 3 or less operands");
445
446 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
447 unsigned Size = Ty.getSizeInBits();
448 bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
449
450 PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;
451
452 #ifndef NDEBUG
453 // Make sure all the operands are using similar size and type.
454 // Should probably be checked by the machine verifier.
455 // This code won't catch cases where the number of lanes is
456 // different between the operands.
457 // If we want to go to that level of details, it is probably
458 // best to check that the types are the same, period.
459 // Currently, we just check that the register banks are the same
460 // for each types.
461 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
462 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
463 assert(
464 AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
465 RBIdx, OpTy.getSizeInBits()) ==
466 AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
467 "Operand has incompatible size");
468 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
469 (void)OpIsFPR;
470 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
471 }
472 #endif // End NDEBUG.
473
474 return getInstructionMapping(DefaultMappingID, 1,
475 getValueMapping(RBIdx, Size), NumOperands);
476 }
477
478 /// \returns true if a given intrinsic \p ID only uses and defines FPRs.
isFPIntrinsic(unsigned ID)479 static bool isFPIntrinsic(unsigned ID) {
480 // TODO: Add more intrinsics.
481 switch (ID) {
482 default:
483 return false;
484 case Intrinsic::aarch64_neon_uaddlv:
485 return true;
486 }
487 }
488
hasFPConstraints(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI,unsigned Depth) const489 bool AArch64RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
490 const MachineRegisterInfo &MRI,
491 const TargetRegisterInfo &TRI,
492 unsigned Depth) const {
493 unsigned Op = MI.getOpcode();
494 if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MI.getIntrinsicID()))
495 return true;
496
497 // Do we have an explicit floating point instruction?
498 if (isPreISelGenericFloatingPointOpcode(Op))
499 return true;
500
501 // No. Check if we have a copy-like instruction. If we do, then we could
502 // still be fed by floating point instructions.
503 if (Op != TargetOpcode::COPY && !MI.isPHI() &&
504 !isPreISelGenericOptimizationHint(Op))
505 return false;
506
507 // Check if we already know the register bank.
508 auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
509 if (RB == &AArch64::FPRRegBank)
510 return true;
511 if (RB == &AArch64::GPRRegBank)
512 return false;
513
514 // We don't know anything.
515 //
516 // If we have a phi, we may be able to infer that it will be assigned a FPR
517 // based off of its inputs.
518 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
519 return false;
520
521 return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
522 return Op.isReg() &&
523 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
524 });
525 }
526
onlyUsesFP(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI,unsigned Depth) const527 bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
528 const MachineRegisterInfo &MRI,
529 const TargetRegisterInfo &TRI,
530 unsigned Depth) const {
531 switch (MI.getOpcode()) {
532 case TargetOpcode::G_FPTOSI:
533 case TargetOpcode::G_FPTOUI:
534 case TargetOpcode::G_FCMP:
535 return true;
536 default:
537 break;
538 }
539 return hasFPConstraints(MI, MRI, TRI, Depth);
540 }
541
onlyDefinesFP(const MachineInstr & MI,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI,unsigned Depth) const542 bool AArch64RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
543 const MachineRegisterInfo &MRI,
544 const TargetRegisterInfo &TRI,
545 unsigned Depth) const {
546 switch (MI.getOpcode()) {
547 case AArch64::G_DUP:
548 case TargetOpcode::G_SITOFP:
549 case TargetOpcode::G_UITOFP:
550 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
551 case TargetOpcode::G_INSERT_VECTOR_ELT:
552 case TargetOpcode::G_BUILD_VECTOR:
553 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
554 return true;
555 default:
556 break;
557 }
558 return hasFPConstraints(MI, MRI, TRI, Depth);
559 }
560
561 const RegisterBankInfo::InstructionMapping &
getInstrMapping(const MachineInstr & MI) const562 AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
563 const unsigned Opc = MI.getOpcode();
564
565 // Try the default logic for non-generic instructions that are either copies
566 // or already have some operands assigned to banks.
567 if ((Opc != TargetOpcode::COPY && !isPreISelGenericOpcode(Opc)) ||
568 Opc == TargetOpcode::G_PHI) {
569 const RegisterBankInfo::InstructionMapping &Mapping =
570 getInstrMappingImpl(MI);
571 if (Mapping.isValid())
572 return Mapping;
573 }
574
575 const MachineFunction &MF = *MI.getParent()->getParent();
576 const MachineRegisterInfo &MRI = MF.getRegInfo();
577 const TargetSubtargetInfo &STI = MF.getSubtarget();
578 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
579
580 switch (Opc) {
581 // G_{F|S|U}REM are not listed because they are not legal.
582 // Arithmetic ops.
583 case TargetOpcode::G_ADD:
584 case TargetOpcode::G_SUB:
585 case TargetOpcode::G_PTR_ADD:
586 case TargetOpcode::G_MUL:
587 case TargetOpcode::G_SDIV:
588 case TargetOpcode::G_UDIV:
589 // Bitwise ops.
590 case TargetOpcode::G_AND:
591 case TargetOpcode::G_OR:
592 case TargetOpcode::G_XOR:
593 // Floating point ops.
594 case TargetOpcode::G_FADD:
595 case TargetOpcode::G_FSUB:
596 case TargetOpcode::G_FMUL:
597 case TargetOpcode::G_FDIV:
598 return getSameKindOfOperandsMapping(MI);
599 case TargetOpcode::G_FPEXT: {
600 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
601 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
602 return getInstructionMapping(
603 DefaultMappingID, /*Cost*/ 1,
604 getFPExtMapping(DstTy.getSizeInBits(), SrcTy.getSizeInBits()),
605 /*NumOperands*/ 2);
606 }
607 // Shifts.
608 case TargetOpcode::G_SHL:
609 case TargetOpcode::G_LSHR:
610 case TargetOpcode::G_ASHR: {
611 LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
612 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
613 if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
614 return getInstructionMapping(DefaultMappingID, 1,
615 &ValMappings[Shift64Imm], 3);
616 return getSameKindOfOperandsMapping(MI);
617 }
618 case TargetOpcode::COPY: {
619 Register DstReg = MI.getOperand(0).getReg();
620 Register SrcReg = MI.getOperand(1).getReg();
621 // Check if one of the register is not a generic register.
622 if ((Register::isPhysicalRegister(DstReg) ||
623 !MRI.getType(DstReg).isValid()) ||
624 (Register::isPhysicalRegister(SrcReg) ||
625 !MRI.getType(SrcReg).isValid())) {
626 const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
627 const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
628 if (!DstRB)
629 DstRB = SrcRB;
630 else if (!SrcRB)
631 SrcRB = DstRB;
632 // If both RB are null that means both registers are generic.
633 // We shouldn't be here.
634 assert(DstRB && SrcRB && "Both RegBank were nullptr");
635 unsigned Size = getSizeInBits(DstReg, MRI, TRI);
636 return getInstructionMapping(
637 DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
638 getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
639 // We only care about the mapping of the destination.
640 /*NumOperands*/ 1);
641 }
642 // Both registers are generic, use G_BITCAST.
643 LLVM_FALLTHROUGH;
644 }
645 case TargetOpcode::G_BITCAST: {
646 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
647 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
648 unsigned Size = DstTy.getSizeInBits();
649 bool DstIsGPR = !DstTy.isVector() && DstTy.getSizeInBits() <= 64;
650 bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
651 const RegisterBank &DstRB =
652 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
653 const RegisterBank &SrcRB =
654 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
655 return getInstructionMapping(
656 DefaultMappingID, copyCost(DstRB, SrcRB, Size),
657 getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
658 // We only care about the mapping of the destination for COPY.
659 /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
660 }
661 default:
662 break;
663 }
664
665 unsigned NumOperands = MI.getNumOperands();
666
667 // Track the size and bank of each register. We don't do partial mappings.
668 SmallVector<unsigned, 4> OpSize(NumOperands);
669 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
670 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
671 auto &MO = MI.getOperand(Idx);
672 if (!MO.isReg() || !MO.getReg())
673 continue;
674
675 LLT Ty = MRI.getType(MO.getReg());
676 OpSize[Idx] = Ty.getSizeInBits();
677
678 // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
679 // For floating-point instructions, scalars go in FPRs.
680 if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc) ||
681 Ty.getSizeInBits() > 64)
682 OpRegBankIdx[Idx] = PMI_FirstFPR;
683 else
684 OpRegBankIdx[Idx] = PMI_FirstGPR;
685 }
686
687 unsigned Cost = 1;
688 // Some of the floating-point instructions have mixed GPR and FPR operands:
689 // fine-tune the computed mapping.
690 switch (Opc) {
691 case AArch64::G_DUP: {
692 Register ScalarReg = MI.getOperand(1).getReg();
693 LLT ScalarTy = MRI.getType(ScalarReg);
694 auto ScalarDef = MRI.getVRegDef(ScalarReg);
695 // s8 is an exception for G_DUP, which we always want on gpr.
696 if (ScalarTy.getSizeInBits() != 8 &&
697 (getRegBank(ScalarReg, MRI, TRI) == &AArch64::FPRRegBank ||
698 onlyDefinesFP(*ScalarDef, MRI, TRI)))
699 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
700 else
701 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
702 break;
703 }
704 case TargetOpcode::G_TRUNC: {
705 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
706 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128)
707 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
708 break;
709 }
710 case TargetOpcode::G_SITOFP:
711 case TargetOpcode::G_UITOFP: {
712 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
713 break;
714 // Integer to FP conversions don't necessarily happen between GPR -> FPR
715 // regbanks. They can also be done within an FPR register.
716 Register SrcReg = MI.getOperand(1).getReg();
717 if (getRegBank(SrcReg, MRI, TRI) == &AArch64::FPRRegBank)
718 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
719 else
720 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
721 break;
722 }
723 case TargetOpcode::G_FPTOSI:
724 case TargetOpcode::G_FPTOUI:
725 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
726 break;
727 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
728 break;
729 case TargetOpcode::G_FCMP: {
730 // If the result is a vector, it must use a FPR.
731 AArch64GenRegisterBankInfo::PartialMappingIdx Idx0 =
732 MRI.getType(MI.getOperand(0).getReg()).isVector() ? PMI_FirstFPR
733 : PMI_FirstGPR;
734 OpRegBankIdx = {Idx0,
735 /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
736 break;
737 }
738 case TargetOpcode::G_BITCAST:
739 // This is going to be a cross register bank copy and this is expensive.
740 if (OpRegBankIdx[0] != OpRegBankIdx[1])
741 Cost = copyCost(
742 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
743 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
744 OpSize[0]);
745 break;
746 case TargetOpcode::G_LOAD:
747 // Loading in vector unit is slightly more expensive.
748 // This is actually only true for the LD1R and co instructions,
749 // but anyway for the fast mode this number does not matter and
750 // for the greedy mode the cost of the cross bank copy will
751 // offset this number.
752 // FIXME: Should be derived from the scheduling model.
753 if (OpRegBankIdx[0] != PMI_FirstGPR)
754 Cost = 2;
755 else
756 // Check if that load feeds fp instructions.
757 // In that case, we want the default mapping to be on FPR
758 // instead of blind map every scalar to GPR.
759 for (const MachineInstr &UseMI :
760 MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
761 // If we have at least one direct use in a FP instruction,
762 // assume this was a floating point load in the IR.
763 // If it was not, we would have had a bitcast before
764 // reaching that instruction.
765 // Int->FP conversion operations are also captured in onlyDefinesFP().
766 if (onlyUsesFP(UseMI, MRI, TRI) || onlyDefinesFP(UseMI, MRI, TRI)) {
767 OpRegBankIdx[0] = PMI_FirstFPR;
768 break;
769 }
770 }
771 break;
772 case TargetOpcode::G_STORE:
773 // Check if that store is fed by fp instructions.
774 if (OpRegBankIdx[0] == PMI_FirstGPR) {
775 Register VReg = MI.getOperand(0).getReg();
776 if (!VReg)
777 break;
778 MachineInstr *DefMI = MRI.getVRegDef(VReg);
779 if (onlyDefinesFP(*DefMI, MRI, TRI))
780 OpRegBankIdx[0] = PMI_FirstFPR;
781 break;
782 }
783 break;
784 case TargetOpcode::G_SELECT: {
785 // If the destination is FPR, preserve that.
786 if (OpRegBankIdx[0] != PMI_FirstGPR)
787 break;
788
789 // If we're taking in vectors, we have no choice but to put everything on
790 // FPRs, except for the condition. The condition must always be on a GPR.
791 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
792 if (SrcTy.isVector()) {
793 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR, PMI_FirstFPR, PMI_FirstFPR};
794 break;
795 }
796
797 // Try to minimize the number of copies. If we have more floating point
798 // constrained values than not, then we'll put everything on FPR. Otherwise,
799 // everything has to be on GPR.
800 unsigned NumFP = 0;
801
802 // Check if the uses of the result always produce floating point values.
803 //
804 // For example:
805 //
806 // %z = G_SELECT %cond %x %y
807 // fpr = G_FOO %z ...
808 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
809 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
810 ++NumFP;
811
812 // Check if the defs of the source values always produce floating point
813 // values.
814 //
815 // For example:
816 //
817 // %x = G_SOMETHING_ALWAYS_FLOAT %a ...
818 // %z = G_SELECT %cond %x %y
819 //
820 // Also check whether or not the sources have already been decided to be
821 // FPR. Keep track of this.
822 //
823 // This doesn't check the condition, since it's just whatever is in NZCV.
824 // This isn't passed explicitly in a register to fcsel/csel.
825 for (unsigned Idx = 2; Idx < 4; ++Idx) {
826 Register VReg = MI.getOperand(Idx).getReg();
827 MachineInstr *DefMI = MRI.getVRegDef(VReg);
828 if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
829 onlyDefinesFP(*DefMI, MRI, TRI))
830 ++NumFP;
831 }
832
833 // If we have more FP constraints than not, then move everything over to
834 // FPR.
835 if (NumFP >= 2)
836 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR, PMI_FirstFPR, PMI_FirstFPR};
837
838 break;
839 }
840 case TargetOpcode::G_UNMERGE_VALUES: {
841 // If the first operand belongs to a FPR register bank, then make sure that
842 // we preserve that.
843 if (OpRegBankIdx[0] != PMI_FirstGPR)
844 break;
845
846 LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg());
847 // UNMERGE into scalars from a vector should always use FPR.
848 // Likewise if any of the uses are FP instructions.
849 if (SrcTy.isVector() || SrcTy == LLT::scalar(128) ||
850 any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
851 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
852 // Set the register bank of every operand to FPR.
853 for (unsigned Idx = 0, NumOperands = MI.getNumOperands();
854 Idx < NumOperands; ++Idx)
855 OpRegBankIdx[Idx] = PMI_FirstFPR;
856 }
857 break;
858 }
859 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
860 // Destination and source need to be FPRs.
861 OpRegBankIdx[0] = PMI_FirstFPR;
862 OpRegBankIdx[1] = PMI_FirstFPR;
863
864 // Index needs to be a GPR.
865 OpRegBankIdx[2] = PMI_FirstGPR;
866 break;
867 case TargetOpcode::G_INSERT_VECTOR_ELT:
868 OpRegBankIdx[0] = PMI_FirstFPR;
869 OpRegBankIdx[1] = PMI_FirstFPR;
870
871 // The element may be either a GPR or FPR. Preserve that behaviour.
872 if (getRegBank(MI.getOperand(2).getReg(), MRI, TRI) == &AArch64::FPRRegBank)
873 OpRegBankIdx[2] = PMI_FirstFPR;
874 else
875 OpRegBankIdx[2] = PMI_FirstGPR;
876
877 // Index needs to be a GPR.
878 OpRegBankIdx[3] = PMI_FirstGPR;
879 break;
880 case TargetOpcode::G_EXTRACT: {
881 // For s128 sources we have to use fpr unless we know otherwise.
882 auto Src = MI.getOperand(1).getReg();
883 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
884 if (SrcTy.getSizeInBits() != 128)
885 break;
886 auto Idx = MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
887 ? PMI_FirstGPR
888 : PMI_FirstFPR;
889 OpRegBankIdx[0] = Idx;
890 OpRegBankIdx[1] = Idx;
891 break;
892 }
893 case TargetOpcode::G_BUILD_VECTOR: {
894 // If the first source operand belongs to a FPR register bank, then make
895 // sure that we preserve that.
896 if (OpRegBankIdx[1] != PMI_FirstGPR)
897 break;
898 Register VReg = MI.getOperand(1).getReg();
899 if (!VReg)
900 break;
901
902 // Get the instruction that defined the source operand reg, and check if
903 // it's a floating point operation. Or, if it's a type like s16 which
904 // doesn't have a exact size gpr register class. The exception is if the
905 // build_vector has all constant operands, which may be better to leave as
906 // gpr without copies, so it can be matched in imported patterns.
907 MachineInstr *DefMI = MRI.getVRegDef(VReg);
908 unsigned DefOpc = DefMI->getOpcode();
909 const LLT SrcTy = MRI.getType(VReg);
910 if (all_of(MI.operands(), [&](const MachineOperand &Op) {
911 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
912 TargetOpcode::G_CONSTANT;
913 }))
914 break;
915 if (isPreISelGenericFloatingPointOpcode(DefOpc) ||
916 SrcTy.getSizeInBits() < 32 ||
917 getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank) {
918 // Have a floating point op.
919 // Make sure every operand gets mapped to a FPR register class.
920 unsigned NumOperands = MI.getNumOperands();
921 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
922 OpRegBankIdx[Idx] = PMI_FirstFPR;
923 }
924 break;
925 }
926 case TargetOpcode::G_VECREDUCE_FADD:
927 case TargetOpcode::G_VECREDUCE_FMUL:
928 case TargetOpcode::G_VECREDUCE_FMAX:
929 case TargetOpcode::G_VECREDUCE_FMIN:
930 case TargetOpcode::G_VECREDUCE_ADD:
931 case TargetOpcode::G_VECREDUCE_MUL:
932 case TargetOpcode::G_VECREDUCE_AND:
933 case TargetOpcode::G_VECREDUCE_OR:
934 case TargetOpcode::G_VECREDUCE_XOR:
935 case TargetOpcode::G_VECREDUCE_SMAX:
936 case TargetOpcode::G_VECREDUCE_SMIN:
937 case TargetOpcode::G_VECREDUCE_UMAX:
938 case TargetOpcode::G_VECREDUCE_UMIN:
939 // Reductions produce a scalar value from a vector, the scalar should be on
940 // FPR bank.
941 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
942 break;
943 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
944 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
945 // These reductions also take a scalar accumulator input.
946 // Assign them FPR for now.
947 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR, PMI_FirstFPR};
948 break;
949 case TargetOpcode::G_INTRINSIC: {
950 // Check if we know that the intrinsic has any constraints on its register
951 // banks. If it does, then update the mapping accordingly.
952 unsigned ID = MI.getIntrinsicID();
953 unsigned Idx = 0;
954 if (!isFPIntrinsic(ID))
955 break;
956 for (const auto &Op : MI.explicit_operands()) {
957 if (Op.isReg())
958 OpRegBankIdx[Idx] = PMI_FirstFPR;
959 ++Idx;
960 }
961 break;
962 }
963 }
964
965 // Finally construct the computed mapping.
966 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
967 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
968 if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
969 auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
970 if (!Mapping->isValid())
971 return getInvalidInstructionMapping();
972
973 OpdsMapping[Idx] = Mapping;
974 }
975 }
976
977 return getInstructionMapping(DefaultMappingID, Cost,
978 getOperandsMapping(OpdsMapping), NumOperands);
979 }
980