1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/TargetFrameLowering.h"
27 #include "llvm/IR/DebugInfoMetadata.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetOptions.h"
32 
33 using namespace llvm;
34 
35 #define GET_REGINFO_TARGET_DESC
36 #include "AArch64GenRegisterInfo.inc"
37 
AArch64RegisterInfo(const Triple & TT)38 AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
39     : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
40   AArch64_MC::initLLVMToCVRegMapping(this);
41 }
42 
43 /// Return whether the register needs a CFI entry. Not all unwinders may know
44 /// about SVE registers, so we assume the lowest common denominator, i.e. the
45 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
46 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
47 /// returned in \p RegToUseForCFI.
regNeedsCFI(unsigned Reg,unsigned & RegToUseForCFI) const48 bool AArch64RegisterInfo::regNeedsCFI(unsigned Reg,
49                                       unsigned &RegToUseForCFI) const {
50   if (AArch64::PPRRegClass.contains(Reg))
51     return false;
52 
53   if (AArch64::ZPRRegClass.contains(Reg)) {
54     RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
55     for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
56       if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
57         return true;
58     }
59     return false;
60   }
61 
62   RegToUseForCFI = Reg;
63   return true;
64 }
65 
hasSVEArgsOrReturn(const MachineFunction * MF)66 bool AArch64RegisterInfo::hasSVEArgsOrReturn(const MachineFunction *MF) {
67   const Function &F = MF->getFunction();
68   return isa<ScalableVectorType>(F.getReturnType()) ||
69          any_of(F.args(), [](const Argument &Arg) {
70            return isa<ScalableVectorType>(Arg.getType());
71          });
72 }
73 
74 const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const75 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
76   assert(MF && "Invalid MachineFunction pointer.");
77 
78   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
79     // GHC set of callee saved regs is empty as all those regs are
80     // used for passing STG regs around
81     return CSR_AArch64_NoRegs_SaveList;
82   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
83     return CSR_AArch64_AllRegs_SaveList;
84 
85   // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
86   // lists depending on that will need to have their Darwin variant as well.
87   if (MF->getSubtarget<AArch64Subtarget>().isTargetDarwin())
88     return getDarwinCalleeSavedRegs(MF);
89 
90   if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
91     return CSR_Win_AArch64_CFGuard_Check_SaveList;
92   if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows())
93     return CSR_Win_AArch64_AAPCS_SaveList;
94   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
95     return CSR_AArch64_AAVPCS_SaveList;
96   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
97     return CSR_AArch64_SVE_AAPCS_SaveList;
98   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
99           ->supportSwiftError() &&
100       MF->getFunction().getAttributes().hasAttrSomewhere(
101           Attribute::SwiftError))
102     return CSR_AArch64_AAPCS_SwiftError_SaveList;
103   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
104     return CSR_AArch64_RT_MostRegs_SaveList;
105   if (MF->getFunction().getCallingConv() == CallingConv::Win64)
106     // This is for OSes other than Windows; Windows is a separate case further
107     // above.
108     return CSR_AArch64_AAPCS_X18_SaveList;
109   if (hasSVEArgsOrReturn(MF))
110     return CSR_AArch64_SVE_AAPCS_SaveList;
111   return CSR_AArch64_AAPCS_SaveList;
112 }
113 
114 const MCPhysReg *
getDarwinCalleeSavedRegs(const MachineFunction * MF) const115 AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
116   assert(MF && "Invalid MachineFunction pointer.");
117   assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
118          "Invalid subtarget for getDarwinCalleeSavedRegs");
119 
120   if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
121     report_fatal_error(
122         "Calling convention CFGuard_Check is unsupported on Darwin.");
123   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
124     return CSR_Darwin_AArch64_AAVPCS_SaveList;
125   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
126     report_fatal_error(
127         "Calling convention SVE_VectorCall is unsupported on Darwin.");
128   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
129     return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
130                ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
131                : CSR_Darwin_AArch64_CXX_TLS_SaveList;
132   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
133           ->supportSwiftError() &&
134       MF->getFunction().getAttributes().hasAttrSomewhere(
135           Attribute::SwiftError))
136     return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
137   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
138     return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
139   return CSR_Darwin_AArch64_AAPCS_SaveList;
140 }
141 
getCalleeSavedRegsViaCopy(const MachineFunction * MF) const142 const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
143     const MachineFunction *MF) const {
144   assert(MF && "Invalid MachineFunction pointer.");
145   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
146       MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
147     return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
148   return nullptr;
149 }
150 
UpdateCustomCalleeSavedRegs(MachineFunction & MF) const151 void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
152     MachineFunction &MF) const {
153   const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
154   SmallVector<MCPhysReg, 32> UpdatedCSRs;
155   for (const MCPhysReg *I = CSRs; *I; ++I)
156     UpdatedCSRs.push_back(*I);
157 
158   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
159     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
160       UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
161     }
162   }
163   // Register lists are zero-terminated.
164   UpdatedCSRs.push_back(0);
165   MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
166 }
167 
168 const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass * RC,unsigned Idx) const169 AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
170                                        unsigned Idx) const {
171   // edge case for GPR/FPR register classes
172   if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
173     return &AArch64::FPR32RegClass;
174   else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
175     return &AArch64::FPR64RegClass;
176 
177   // Forward to TableGen's default version.
178   return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
179 }
180 
181 const uint32_t *
getDarwinCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const182 AArch64RegisterInfo::getDarwinCallPreservedMask(const MachineFunction &MF,
183                                                 CallingConv::ID CC) const {
184   assert(MF.getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
185          "Invalid subtarget for getDarwinCallPreservedMask");
186 
187   if (CC == CallingConv::CXX_FAST_TLS)
188     return CSR_Darwin_AArch64_CXX_TLS_RegMask;
189   if (CC == CallingConv::AArch64_VectorCall)
190     return CSR_Darwin_AArch64_AAVPCS_RegMask;
191   if (CC == CallingConv::AArch64_SVE_VectorCall)
192     report_fatal_error(
193         "Calling convention SVE_VectorCall is unsupported on Darwin.");
194   if (CC == CallingConv::CFGuard_Check)
195     report_fatal_error(
196         "Calling convention CFGuard_Check is unsupported on Darwin.");
197   if (MF.getSubtarget<AArch64Subtarget>()
198           .getTargetLowering()
199           ->supportSwiftError() &&
200       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
201     return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
202   if (CC == CallingConv::PreserveMost)
203     return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
204   return CSR_Darwin_AArch64_AAPCS_RegMask;
205 }
206 
207 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const208 AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
209                                           CallingConv::ID CC) const {
210   bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
211   if (CC == CallingConv::GHC)
212     // This is academic because all GHC calls are (supposed to be) tail calls
213     return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
214   if (CC == CallingConv::AnyReg)
215     return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
216 
217   // All the following calling conventions are handled differently on Darwin.
218   if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) {
219     if (SCS)
220       report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
221     return getDarwinCallPreservedMask(MF, CC);
222   }
223 
224   if (CC == CallingConv::AArch64_VectorCall)
225     return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
226   if (CC == CallingConv::AArch64_SVE_VectorCall)
227     return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
228                : CSR_AArch64_SVE_AAPCS_RegMask;
229   if (CC == CallingConv::CFGuard_Check)
230     return CSR_Win_AArch64_CFGuard_Check_RegMask;
231   if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
232           ->supportSwiftError() &&
233       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
234     return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
235                : CSR_AArch64_AAPCS_SwiftError_RegMask;
236   if (CC == CallingConv::PreserveMost)
237     return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
238                : CSR_AArch64_RT_MostRegs_RegMask;
239   else
240     return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
241 }
242 
getCustomEHPadPreservedMask(const MachineFunction & MF) const243 const uint32_t *AArch64RegisterInfo::getCustomEHPadPreservedMask(
244     const MachineFunction &MF) const {
245   if (MF.getSubtarget<AArch64Subtarget>().isTargetLinux())
246     return CSR_AArch64_AAPCS_RegMask;
247 
248   return nullptr;
249 }
250 
getTLSCallPreservedMask() const251 const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
252   if (TT.isOSDarwin())
253     return CSR_Darwin_AArch64_TLS_RegMask;
254 
255   assert(TT.isOSBinFormatELF() && "Invalid target");
256   return CSR_AArch64_TLS_ELF_RegMask;
257 }
258 
UpdateCustomCallPreservedMask(MachineFunction & MF,const uint32_t ** Mask) const259 void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
260                                                  const uint32_t **Mask) const {
261   uint32_t *UpdatedMask = MF.allocateRegMask();
262   unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
263   memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
264 
265   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
266     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
267       for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
268                                    this, true);
269            SubReg.isValid(); ++SubReg) {
270         // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
271         // register mask.
272         UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
273       }
274     }
275   }
276   *Mask = UpdatedMask;
277 }
278 
getNoPreservedMask() const279 const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
280   return CSR_AArch64_NoRegs_RegMask;
281 }
282 
283 const uint32_t *
getThisReturnPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const284 AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
285                                                 CallingConv::ID CC) const {
286   // This should return a register mask that is the same as that returned by
287   // getCallPreservedMask but that additionally preserves the register used for
288   // the first i64 argument (which must also be the register used to return a
289   // single i64 return value)
290   //
291   // In case that the calling convention does not use the same register for
292   // both, the function should return NULL (does not currently apply)
293   assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
294   if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin())
295     return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
296   return CSR_AArch64_AAPCS_ThisReturn_RegMask;
297 }
298 
getWindowsStackProbePreservedMask() const299 const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
300   return CSR_AArch64_StackProbe_Windows_RegMask;
301 }
302 
303 BitVector
getReservedRegs(const MachineFunction & MF) const304 AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
305   const AArch64FrameLowering *TFI = getFrameLowering(MF);
306 
307   // FIXME: avoid re-calculating this every time.
308   BitVector Reserved(getNumRegs());
309   markSuperRegs(Reserved, AArch64::WSP);
310   markSuperRegs(Reserved, AArch64::WZR);
311 
312   if (TFI->hasFP(MF) || TT.isOSDarwin())
313     markSuperRegs(Reserved, AArch64::W29);
314 
315   for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
316     if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
317       markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
318   }
319 
320   if (hasBasePointer(MF))
321     markSuperRegs(Reserved, AArch64::W19);
322 
323   // SLH uses register W16/X16 as the taint register.
324   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
325     markSuperRegs(Reserved, AArch64::W16);
326 
327   assert(checkAllSuperRegsMarked(Reserved));
328   return Reserved;
329 }
330 
isReservedReg(const MachineFunction & MF,MCRegister Reg) const331 bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
332                                         MCRegister Reg) const {
333   return getReservedRegs(MF)[Reg];
334 }
335 
isAnyArgRegReserved(const MachineFunction & MF) const336 bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
337   return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
338     return isReservedReg(MF, r);
339   });
340 }
341 
emitReservedArgRegCallError(const MachineFunction & MF) const342 void AArch64RegisterInfo::emitReservedArgRegCallError(
343     const MachineFunction &MF) const {
344   const Function &F = MF.getFunction();
345   F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
346     " function calls if any of the argument registers is reserved.")});
347 }
348 
isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg) const349 bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
350                                           MCRegister PhysReg) const {
351   return !isReservedReg(MF, PhysReg);
352 }
353 
isConstantPhysReg(MCRegister PhysReg) const354 bool AArch64RegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
355   return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
356 }
357 
358 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const359 AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
360                                       unsigned Kind) const {
361   return &AArch64::GPR64spRegClass;
362 }
363 
364 const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC) const365 AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
366   if (RC == &AArch64::CCRRegClass)
367     return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
368   return RC;
369 }
370 
getBaseRegister() const371 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
372 
hasBasePointer(const MachineFunction & MF) const373 bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
374   const MachineFrameInfo &MFI = MF.getFrameInfo();
375 
376   // In the presence of variable sized objects or funclets, if the fixed stack
377   // size is large enough that referencing from the FP won't result in things
378   // being in range relatively often, we can use a base pointer to allow access
379   // from the other direction like the SP normally works.
380   //
381   // Furthermore, if both variable sized objects are present, and the
382   // stack needs to be dynamically re-aligned, the base pointer is the only
383   // reliable way to reference the locals.
384   if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
385     if (needsStackRealignment(MF))
386       return true;
387 
388     if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
389       const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
390       // Frames that have variable sized objects and scalable SVE objects,
391       // should always use a basepointer.
392       if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
393         return true;
394     }
395 
396     // Conservatively estimate whether the negative offset from the frame
397     // pointer will be sufficient to reach. If a function has a smallish
398     // frame, it's less likely to have lots of spills and callee saved
399     // space, so it's all more likely to be within range of the frame pointer.
400     // If it's wrong, we'll materialize the constant and still get to the
401     // object; it's just suboptimal. Negative offsets use the unscaled
402     // load/store instructions, which have a 9-bit signed immediate.
403     return MFI.getLocalFrameSize() >= 256;
404   }
405 
406   return false;
407 }
408 
409 Register
getFrameRegister(const MachineFunction & MF) const410 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
411   const AArch64FrameLowering *TFI = getFrameLowering(MF);
412   return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
413 }
414 
requiresRegisterScavenging(const MachineFunction & MF) const415 bool AArch64RegisterInfo::requiresRegisterScavenging(
416     const MachineFunction &MF) const {
417   return true;
418 }
419 
requiresVirtualBaseRegisters(const MachineFunction & MF) const420 bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
421     const MachineFunction &MF) const {
422   return true;
423 }
424 
425 bool
useFPForScavengingIndex(const MachineFunction & MF) const426 AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
427   // This function indicates whether the emergency spillslot should be placed
428   // close to the beginning of the stackframe (closer to FP) or the end
429   // (closer to SP).
430   //
431   // The beginning works most reliably if we have a frame pointer.
432   // In the presence of any non-constant space between FP and locals,
433   // (e.g. in case of stack realignment or a scalable SVE area), it is
434   // better to use SP or BP.
435   const AArch64FrameLowering &TFI = *getFrameLowering(MF);
436   const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
437   assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
438           AFI->hasCalculatedStackSizeSVE()) &&
439          "Expected SVE area to be calculated by this point");
440   return TFI.hasFP(MF) && !needsStackRealignment(MF) && !AFI->getStackSizeSVE();
441 }
442 
requiresFrameIndexScavenging(const MachineFunction & MF) const443 bool AArch64RegisterInfo::requiresFrameIndexScavenging(
444     const MachineFunction &MF) const {
445   return true;
446 }
447 
448 bool
cannotEliminateFrame(const MachineFunction & MF) const449 AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
450   const MachineFrameInfo &MFI = MF.getFrameInfo();
451   if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
452     return true;
453   return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
454 }
455 
456 /// needsFrameBaseReg - Returns true if the instruction's frame index
457 /// reference would be better served by a base register other than FP
458 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
459 /// references it should create new base registers for.
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const460 bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
461                                             int64_t Offset) const {
462   for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
463     assert(i < MI->getNumOperands() &&
464            "Instr doesn't have FrameIndex operand!");
465 
466   // It's the load/store FI references that cause issues, as it can be difficult
467   // to materialize the offset if it won't fit in the literal field. Estimate
468   // based on the size of the local frame and some conservative assumptions
469   // about the rest of the stack frame (note, this is pre-regalloc, so
470   // we don't know everything for certain yet) whether this offset is likely
471   // to be out of range of the immediate. Return true if so.
472 
473   // We only generate virtual base registers for loads and stores, so
474   // return false for everything else.
475   if (!MI->mayLoad() && !MI->mayStore())
476     return false;
477 
478   // Without a virtual base register, if the function has variable sized
479   // objects, all fixed-size local references will be via the frame pointer,
480   // Approximate the offset and see if it's legal for the instruction.
481   // Note that the incoming offset is based on the SP value at function entry,
482   // so it'll be negative.
483   MachineFunction &MF = *MI->getParent()->getParent();
484   const AArch64FrameLowering *TFI = getFrameLowering(MF);
485   MachineFrameInfo &MFI = MF.getFrameInfo();
486 
487   // Estimate an offset from the frame pointer.
488   // Conservatively assume all GPR callee-saved registers get pushed.
489   // FP, LR, X19-X28, D8-D15. 64-bits each.
490   int64_t FPOffset = Offset - 16 * 20;
491   // Estimate an offset from the stack pointer.
492   // The incoming offset is relating to the SP at the start of the function,
493   // but when we access the local it'll be relative to the SP after local
494   // allocation, so adjust our SP-relative offset by that allocation size.
495   Offset += MFI.getLocalFrameSize();
496   // Assume that we'll have at least some spill slots allocated.
497   // FIXME: This is a total SWAG number. We should run some statistics
498   //        and pick a real one.
499   Offset += 128; // 128 bytes of spill slots
500 
501   // If there is a frame pointer, try using it.
502   // The FP is only available if there is no dynamic realignment. We
503   // don't know for sure yet whether we'll need that, so we guess based
504   // on whether there are any local variables that would trigger it.
505   if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
506     return false;
507 
508   // If we can reference via the stack pointer or base pointer, try that.
509   // FIXME: This (and the code that resolves the references) can be improved
510   //        to only disallow SP relative references in the live range of
511   //        the VLA(s). In practice, it's unclear how much difference that
512   //        would make, but it may be worth doing.
513   if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
514     return false;
515 
516   // If even offset 0 is illegal, we don't want a virtual base register.
517   if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
518     return false;
519 
520   // The offset likely isn't legal; we want to allocate a virtual base register.
521   return true;
522 }
523 
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const524 bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
525                                              Register BaseReg,
526                                              int64_t Offset) const {
527   assert(MI && "Unable to get the legal offset for nil instruction.");
528   StackOffset SaveOffset = StackOffset::getFixed(Offset);
529   return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
530 }
531 
532 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
533 /// at the beginning of the basic block.
534 Register
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const535 AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
536                                                   int FrameIdx,
537                                                   int64_t Offset) const {
538   MachineBasicBlock::iterator Ins = MBB->begin();
539   DebugLoc DL; // Defaults to "unknown"
540   if (Ins != MBB->end())
541     DL = Ins->getDebugLoc();
542   const MachineFunction &MF = *MBB->getParent();
543   const AArch64InstrInfo *TII =
544       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
545   const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
546   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
547   Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
548   MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
549   unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
550 
551   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
552       .addFrameIndex(FrameIdx)
553       .addImm(Offset)
554       .addImm(Shifter);
555 
556   return BaseReg;
557 }
558 
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const559 void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
560                                             int64_t Offset) const {
561   // ARM doesn't need the general 64-bit offsets
562   StackOffset Off = StackOffset::getFixed(Offset);
563 
564   unsigned i = 0;
565   while (!MI.getOperand(i).isFI()) {
566     ++i;
567     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
568   }
569 
570   const MachineFunction *MF = MI.getParent()->getParent();
571   const AArch64InstrInfo *TII =
572       MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
573   bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
574   assert(Done && "Unable to resolve frame index!");
575   (void)Done;
576 }
577 
578 // Create a scratch register for the frame index elimination in an instruction.
579 // This function has special handling of stack tagging loop pseudos, in which
580 // case it can also change the instruction opcode (but not the operands).
581 static Register
createScratchRegisterForInstruction(MachineInstr & MI,const AArch64InstrInfo * TII)582 createScratchRegisterForInstruction(MachineInstr &MI,
583                                     const AArch64InstrInfo *TII) {
584   // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
585   // replace the instruction with the writeback variant because it will now
586   // satisfy the operand constraints for it.
587   if (MI.getOpcode() == AArch64::STGloop) {
588     MI.setDesc(TII->get(AArch64::STGloop_wback));
589     return MI.getOperand(1).getReg();
590   } else if (MI.getOpcode() == AArch64::STZGloop) {
591     MI.setDesc(TII->get(AArch64::STZGloop_wback));
592     return MI.getOperand(1).getReg();
593   } else {
594     return MI.getMF()->getRegInfo().createVirtualRegister(
595         &AArch64::GPR64RegClass);
596   }
597 }
598 
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const599 void AArch64RegisterInfo::getOffsetOpcodes(
600     const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
601   // The smallest scalable element supported by scaled SVE addressing
602   // modes are predicates, which are 2 scalable bytes in size. So the scalable
603   // byte offset must always be a multiple of 2.
604   assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
605 
606   // Add fixed-sized offset using existing DIExpression interface.
607   DIExpression::appendOffset(Ops, Offset.getFixed());
608 
609   unsigned VG = getDwarfRegNum(AArch64::VG, true);
610   int64_t VGSized = Offset.getScalable() / 2;
611   if (VGSized > 0) {
612     Ops.push_back(dwarf::DW_OP_constu);
613     Ops.push_back(VGSized);
614     Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
615     Ops.push_back(dwarf::DW_OP_mul);
616     Ops.push_back(dwarf::DW_OP_plus);
617   } else if (VGSized < 0) {
618     Ops.push_back(dwarf::DW_OP_constu);
619     Ops.push_back(-VGSized);
620     Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
621     Ops.push_back(dwarf::DW_OP_mul);
622     Ops.push_back(dwarf::DW_OP_minus);
623   }
624 }
625 
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const626 void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
627                                               int SPAdj, unsigned FIOperandNum,
628                                               RegScavenger *RS) const {
629   assert(SPAdj == 0 && "Unexpected");
630 
631   MachineInstr &MI = *II;
632   MachineBasicBlock &MBB = *MI.getParent();
633   MachineFunction &MF = *MBB.getParent();
634   const MachineFrameInfo &MFI = MF.getFrameInfo();
635   const AArch64InstrInfo *TII =
636       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
637   const AArch64FrameLowering *TFI = getFrameLowering(MF);
638   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
639   bool Tagged =
640       MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
641   Register FrameReg;
642 
643   // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
644   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
645       MI.getOpcode() == TargetOpcode::PATCHPOINT ||
646       MI.getOpcode() == TargetOpcode::STATEPOINT) {
647     StackOffset Offset =
648         TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
649                                         /*PreferFP=*/true,
650                                         /*ForSimm=*/false);
651     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
652     MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
653     MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
654     return;
655   }
656 
657   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
658     MachineOperand &FI = MI.getOperand(FIOperandNum);
659     StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
660     assert(!Offset.getScalable() &&
661            "Frame offsets with a scalable component are not supported");
662     FI.ChangeToImmediate(Offset.getFixed());
663     return;
664   }
665 
666   StackOffset Offset;
667   if (MI.getOpcode() == AArch64::TAGPstack) {
668     // TAGPstack must use the virtual frame register in its 3rd operand.
669     const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
670     FrameReg = MI.getOperand(3).getReg();
671     Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
672                                       AFI->getTaggedBasePointerOffset());
673   } else if (Tagged) {
674     StackOffset SPOffset = StackOffset::getFixed(
675         MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
676     if (MFI.hasVarSizedObjects() ||
677         isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
678             (AArch64FrameOffsetCanUpdate | AArch64FrameOffsetIsLegal)) {
679       // Can't update to SP + offset in place. Precalculate the tagged pointer
680       // in a scratch register.
681       Offset = TFI->resolveFrameIndexReference(
682           MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
683       Register ScratchReg =
684           MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
685       emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
686                       TII);
687       BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
688           .addReg(ScratchReg)
689           .addReg(ScratchReg)
690           .addImm(0);
691       MI.getOperand(FIOperandNum)
692           .ChangeToRegister(ScratchReg, false, false, true);
693       return;
694     }
695     FrameReg = AArch64::SP;
696     Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
697                                    (int64_t)MFI.getStackSize());
698   } else {
699     Offset = TFI->resolveFrameIndexReference(
700         MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
701   }
702 
703   // Modify MI as necessary to handle as much of 'Offset' as possible
704   if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
705     return;
706 
707   assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
708          "Emergency spill slot is out of reach");
709 
710   // If we get here, the immediate doesn't fit into the instruction.  We folded
711   // as much as possible above.  Handle the rest, providing a register that is
712   // SP+LargeImm.
713   Register ScratchReg = createScratchRegisterForInstruction(MI, TII);
714   emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
715   MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
716 }
717 
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const718 unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
719                                                   MachineFunction &MF) const {
720   const AArch64FrameLowering *TFI = getFrameLowering(MF);
721 
722   switch (RC->getID()) {
723   default:
724     return 0;
725   case AArch64::GPR32RegClassID:
726   case AArch64::GPR32spRegClassID:
727   case AArch64::GPR32allRegClassID:
728   case AArch64::GPR64spRegClassID:
729   case AArch64::GPR64allRegClassID:
730   case AArch64::GPR64RegClassID:
731   case AArch64::GPR32commonRegClassID:
732   case AArch64::GPR64commonRegClassID:
733     return 32 - 1                                   // XZR/SP
734               - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
735               - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
736               - hasBasePointer(MF);  // X19
737   case AArch64::FPR8RegClassID:
738   case AArch64::FPR16RegClassID:
739   case AArch64::FPR32RegClassID:
740   case AArch64::FPR64RegClassID:
741   case AArch64::FPR128RegClassID:
742     return 32;
743 
744   case AArch64::DDRegClassID:
745   case AArch64::DDDRegClassID:
746   case AArch64::DDDDRegClassID:
747   case AArch64::QQRegClassID:
748   case AArch64::QQQRegClassID:
749   case AArch64::QQQQRegClassID:
750     return 32;
751 
752   case AArch64::FPR128_loRegClassID:
753   case AArch64::FPR64_loRegClassID:
754   case AArch64::FPR16_loRegClassID:
755     return 16;
756   }
757 }
758 
getLocalAddressRegister(const MachineFunction & MF) const759 unsigned AArch64RegisterInfo::getLocalAddressRegister(
760   const MachineFunction &MF) const {
761   const auto &MFI = MF.getFrameInfo();
762   if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
763     return AArch64::SP;
764   else if (needsStackRealignment(MF))
765     return getBaseRegister();
766   return getFrameRegister(MF);
767 }
768 
769 /// SrcRC and DstRC will be morphed into NewRC if this returns true
shouldCoalesce(MachineInstr * MI,const TargetRegisterClass * SrcRC,unsigned SubReg,const TargetRegisterClass * DstRC,unsigned DstSubReg,const TargetRegisterClass * NewRC,LiveIntervals & LIS) const770 bool AArch64RegisterInfo::shouldCoalesce(
771     MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
772     const TargetRegisterClass *DstRC, unsigned DstSubReg,
773     const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
774   if (MI->isCopy() &&
775       ((DstRC->getID() == AArch64::GPR64RegClassID) ||
776        (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
777       MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
778     // Do not coalesce in the case of a 32-bit subregister copy
779     // which implements a 32 to 64 bit zero extension
780     // which relies on the upper 32 bits being zeroed.
781     return false;
782   return true;
783 }
784