1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of the TargetRegisterInfo 10 // class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AArch64RegisterInfo.h" 15 #include "AArch64FrameLowering.h" 16 #include "AArch64InstrInfo.h" 17 #include "AArch64MachineFunctionInfo.h" 18 #include "AArch64Subtarget.h" 19 #include "MCTargetDesc/AArch64AddressingModes.h" 20 #include "MCTargetDesc/AArch64InstPrinter.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/BinaryFormat/Dwarf.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/RegisterScavenging.h" 27 #include "llvm/CodeGen/TargetFrameLowering.h" 28 #include "llvm/IR/DebugInfoMetadata.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include "llvm/Target/TargetOptions.h" 33 #include "llvm/TargetParser/Triple.h" 34 35 using namespace llvm; 36 37 #define GET_CC_REGISTER_LISTS 38 #include "AArch64GenCallingConv.inc" 39 #define GET_REGINFO_TARGET_DESC 40 #include "AArch64GenRegisterInfo.inc" 41 42 AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT) 43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) { 44 AArch64_MC::initLLVMToCVRegMapping(this); 45 } 46 47 /// Return whether the register needs a CFI entry. Not all unwinders may know 48 /// about SVE registers, so we assume the lowest common denominator, i.e. the 49 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the 50 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is 51 /// returned in \p RegToUseForCFI. 52 bool AArch64RegisterInfo::regNeedsCFI(unsigned Reg, 53 unsigned &RegToUseForCFI) const { 54 if (AArch64::PPRRegClass.contains(Reg)) 55 return false; 56 57 if (AArch64::ZPRRegClass.contains(Reg)) { 58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub); 59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) { 60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI) 61 return true; 62 } 63 return false; 64 } 65 66 RegToUseForCFI = Reg; 67 return true; 68 } 69 70 const MCPhysReg * 71 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 72 assert(MF && "Invalid MachineFunction pointer."); 73 74 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 75 // GHC set of callee saved regs is empty as all those regs are 76 // used for passing STG regs around 77 return CSR_AArch64_NoRegs_SaveList; 78 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 79 return CSR_AArch64_AllRegs_SaveList; 80 81 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save 82 // lists depending on that will need to have their Darwin variant as well. 83 if (MF->getSubtarget<AArch64Subtarget>().isTargetDarwin()) 84 return getDarwinCalleeSavedRegs(MF); 85 86 if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check) 87 return CSR_Win_AArch64_CFGuard_Check_SaveList; 88 if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows()) { 89 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering() 90 ->supportSwiftError() && 91 MF->getFunction().getAttributes().hasAttrSomewhere( 92 Attribute::SwiftError)) 93 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList; 94 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail) 95 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList; 96 return CSR_Win_AArch64_AAPCS_SaveList; 97 } 98 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall) 99 return CSR_AArch64_AAVPCS_SaveList; 100 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall) 101 return CSR_AArch64_SVE_AAPCS_SaveList; 102 if (MF->getFunction().getCallingConv() == 103 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0) 104 report_fatal_error( 105 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is " 106 "only supported to improve calls to SME ACLE save/restore/disable-za " 107 "functions, and is not intended to be used beyond that scope."); 108 if (MF->getFunction().getCallingConv() == 109 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2) 110 report_fatal_error( 111 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is " 112 "only supported to improve calls to SME ACLE __arm_sme_state " 113 "and is not intended to be used beyond that scope."); 114 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering() 115 ->supportSwiftError() && 116 MF->getFunction().getAttributes().hasAttrSomewhere( 117 Attribute::SwiftError)) 118 return CSR_AArch64_AAPCS_SwiftError_SaveList; 119 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail) 120 return CSR_AArch64_AAPCS_SwiftTail_SaveList; 121 if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost) 122 return CSR_AArch64_RT_MostRegs_SaveList; 123 if (MF->getFunction().getCallingConv() == CallingConv::PreserveAll) 124 return CSR_AArch64_RT_AllRegs_SaveList; 125 if (MF->getFunction().getCallingConv() == CallingConv::Win64) 126 // This is for OSes other than Windows; Windows is a separate case further 127 // above. 128 return CSR_AArch64_AAPCS_X18_SaveList; 129 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC()) 130 return CSR_AArch64_SVE_AAPCS_SaveList; 131 return CSR_AArch64_AAPCS_SaveList; 132 } 133 134 const MCPhysReg * 135 AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const { 136 assert(MF && "Invalid MachineFunction pointer."); 137 assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() && 138 "Invalid subtarget for getDarwinCalleeSavedRegs"); 139 140 if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check) 141 report_fatal_error( 142 "Calling convention CFGuard_Check is unsupported on Darwin."); 143 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall) 144 return CSR_Darwin_AArch64_AAVPCS_SaveList; 145 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall) 146 report_fatal_error( 147 "Calling convention SVE_VectorCall is unsupported on Darwin."); 148 if (MF->getFunction().getCallingConv() == 149 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0) 150 report_fatal_error( 151 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is " 152 "only supported to improve calls to SME ACLE save/restore/disable-za " 153 "functions, and is not intended to be used beyond that scope."); 154 if (MF->getFunction().getCallingConv() == 155 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2) 156 report_fatal_error( 157 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is " 158 "only supported to improve calls to SME ACLE __arm_sme_state " 159 "and is not intended to be used beyond that scope."); 160 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS) 161 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() 162 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList 163 : CSR_Darwin_AArch64_CXX_TLS_SaveList; 164 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering() 165 ->supportSwiftError() && 166 MF->getFunction().getAttributes().hasAttrSomewhere( 167 Attribute::SwiftError)) 168 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList; 169 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail) 170 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList; 171 if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost) 172 return CSR_Darwin_AArch64_RT_MostRegs_SaveList; 173 if (MF->getFunction().getCallingConv() == CallingConv::PreserveAll) 174 return CSR_Darwin_AArch64_RT_AllRegs_SaveList; 175 if (MF->getFunction().getCallingConv() == CallingConv::Win64) 176 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList; 177 return CSR_Darwin_AArch64_AAPCS_SaveList; 178 } 179 180 const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy( 181 const MachineFunction *MF) const { 182 assert(MF && "Invalid MachineFunction pointer."); 183 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 184 MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()) 185 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList; 186 return nullptr; 187 } 188 189 void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs( 190 MachineFunction &MF) const { 191 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF); 192 SmallVector<MCPhysReg, 32> UpdatedCSRs; 193 for (const MCPhysReg *I = CSRs; *I; ++I) 194 UpdatedCSRs.push_back(*I); 195 196 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) { 197 if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) { 198 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i)); 199 } 200 } 201 // Register lists are zero-terminated. 202 UpdatedCSRs.push_back(0); 203 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs); 204 } 205 206 const TargetRegisterClass * 207 AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 208 unsigned Idx) const { 209 // edge case for GPR/FPR register classes 210 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub) 211 return &AArch64::FPR32RegClass; 212 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub) 213 return &AArch64::FPR64RegClass; 214 215 // Forward to TableGen's default version. 216 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 217 } 218 219 const uint32_t * 220 AArch64RegisterInfo::getDarwinCallPreservedMask(const MachineFunction &MF, 221 CallingConv::ID CC) const { 222 assert(MF.getSubtarget<AArch64Subtarget>().isTargetDarwin() && 223 "Invalid subtarget for getDarwinCallPreservedMask"); 224 225 if (CC == CallingConv::CXX_FAST_TLS) 226 return CSR_Darwin_AArch64_CXX_TLS_RegMask; 227 if (CC == CallingConv::AArch64_VectorCall) 228 return CSR_Darwin_AArch64_AAVPCS_RegMask; 229 if (CC == CallingConv::AArch64_SVE_VectorCall) 230 report_fatal_error( 231 "Calling convention SVE_VectorCall is unsupported on Darwin."); 232 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0) 233 report_fatal_error( 234 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is " 235 "unsupported on Darwin."); 236 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2) 237 report_fatal_error( 238 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is " 239 "unsupported on Darwin."); 240 if (CC == CallingConv::CFGuard_Check) 241 report_fatal_error( 242 "Calling convention CFGuard_Check is unsupported on Darwin."); 243 if (MF.getSubtarget<AArch64Subtarget>() 244 .getTargetLowering() 245 ->supportSwiftError() && 246 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 247 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask; 248 if (CC == CallingConv::SwiftTail) 249 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask; 250 if (CC == CallingConv::PreserveMost) 251 return CSR_Darwin_AArch64_RT_MostRegs_RegMask; 252 if (CC == CallingConv::PreserveAll) 253 return CSR_Darwin_AArch64_RT_AllRegs_RegMask; 254 return CSR_Darwin_AArch64_AAPCS_RegMask; 255 } 256 257 const uint32_t * 258 AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF, 259 CallingConv::ID CC) const { 260 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack); 261 if (CC == CallingConv::GHC) 262 // This is academic because all GHC calls are (supposed to be) tail calls 263 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask; 264 if (CC == CallingConv::AnyReg) 265 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask; 266 267 // All the following calling conventions are handled differently on Darwin. 268 if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) { 269 if (SCS) 270 report_fatal_error("ShadowCallStack attribute not supported on Darwin."); 271 return getDarwinCallPreservedMask(MF, CC); 272 } 273 274 if (CC == CallingConv::AArch64_VectorCall) 275 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask; 276 if (CC == CallingConv::AArch64_SVE_VectorCall) 277 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask 278 : CSR_AArch64_SVE_AAPCS_RegMask; 279 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0) 280 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask; 281 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2) 282 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask; 283 if (CC == CallingConv::CFGuard_Check) 284 return CSR_Win_AArch64_CFGuard_Check_RegMask; 285 if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering() 286 ->supportSwiftError() && 287 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 288 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask 289 : CSR_AArch64_AAPCS_SwiftError_RegMask; 290 if (CC == CallingConv::SwiftTail) { 291 if (SCS) 292 report_fatal_error("ShadowCallStack attribute not supported with swifttail"); 293 return CSR_AArch64_AAPCS_SwiftTail_RegMask; 294 } 295 if (CC == CallingConv::PreserveMost) 296 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask 297 : CSR_AArch64_RT_MostRegs_RegMask; 298 else if (CC == CallingConv::PreserveAll) 299 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask 300 : CSR_AArch64_RT_AllRegs_RegMask; 301 302 else 303 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask; 304 } 305 306 const uint32_t *AArch64RegisterInfo::getCustomEHPadPreservedMask( 307 const MachineFunction &MF) const { 308 if (MF.getSubtarget<AArch64Subtarget>().isTargetLinux()) 309 return CSR_AArch64_AAPCS_RegMask; 310 311 return nullptr; 312 } 313 314 const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const { 315 if (TT.isOSDarwin()) 316 return CSR_Darwin_AArch64_TLS_RegMask; 317 318 assert(TT.isOSBinFormatELF() && "Invalid target"); 319 return CSR_AArch64_TLS_ELF_RegMask; 320 } 321 322 void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF, 323 const uint32_t **Mask) const { 324 uint32_t *UpdatedMask = MF.allocateRegMask(); 325 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs()); 326 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize); 327 328 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) { 329 if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) { 330 for (MCPhysReg SubReg : 331 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) { 332 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the 333 // register mask. 334 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32); 335 } 336 } 337 } 338 *Mask = UpdatedMask; 339 } 340 341 const uint32_t *AArch64RegisterInfo::getSMStartStopCallPreservedMask() const { 342 return CSR_AArch64_SMStartStop_RegMask; 343 } 344 345 const uint32_t * 346 AArch64RegisterInfo::SMEABISupportRoutinesCallPreservedMaskFromX0() const { 347 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask; 348 } 349 350 const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const { 351 return CSR_AArch64_NoRegs_RegMask; 352 } 353 354 const uint32_t * 355 AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 356 CallingConv::ID CC) const { 357 // This should return a register mask that is the same as that returned by 358 // getCallPreservedMask but that additionally preserves the register used for 359 // the first i64 argument (which must also be the register used to return a 360 // single i64 return value) 361 // 362 // In case that the calling convention does not use the same register for 363 // both, the function should return NULL (does not currently apply) 364 assert(CC != CallingConv::GHC && "should not be GHC calling convention."); 365 if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) 366 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask; 367 return CSR_AArch64_AAPCS_ThisReturn_RegMask; 368 } 369 370 const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const { 371 return CSR_AArch64_StackProbe_Windows_RegMask; 372 } 373 374 std::optional<std::string> 375 AArch64RegisterInfo::explainReservedReg(const MachineFunction &MF, 376 MCRegister PhysReg) const { 377 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19)) 378 return std::string("X19 is used as the frame base pointer register."); 379 380 if (MF.getSubtarget<AArch64Subtarget>().isWindowsArm64EC()) { 381 bool warn = false; 382 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) || 383 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) || 384 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) || 385 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) || 386 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28)) 387 warn = true; 388 389 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i) 390 if (MCRegisterInfo::regsOverlap(PhysReg, i)) 391 warn = true; 392 393 if (warn) 394 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) + 395 " is clobbered by asynchronous signals when using Arm64EC."; 396 } 397 398 return {}; 399 } 400 401 BitVector 402 AArch64RegisterInfo::getStrictlyReservedRegs(const MachineFunction &MF) const { 403 const AArch64FrameLowering *TFI = getFrameLowering(MF); 404 405 // FIXME: avoid re-calculating this every time. 406 BitVector Reserved(getNumRegs()); 407 markSuperRegs(Reserved, AArch64::WSP); 408 markSuperRegs(Reserved, AArch64::WZR); 409 410 if (TFI->hasFP(MF) || TT.isOSDarwin()) 411 markSuperRegs(Reserved, AArch64::W29); 412 413 if (MF.getSubtarget<AArch64Subtarget>().isWindowsArm64EC()) { 414 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous 415 // signals, so we can't ever use them. 416 markSuperRegs(Reserved, AArch64::W13); 417 markSuperRegs(Reserved, AArch64::W14); 418 markSuperRegs(Reserved, AArch64::W23); 419 markSuperRegs(Reserved, AArch64::W24); 420 markSuperRegs(Reserved, AArch64::W28); 421 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i) 422 markSuperRegs(Reserved, i); 423 } 424 425 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) { 426 if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i)) 427 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i)); 428 } 429 430 if (hasBasePointer(MF)) 431 markSuperRegs(Reserved, AArch64::W19); 432 433 // SLH uses register W16/X16 as the taint register. 434 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening)) 435 markSuperRegs(Reserved, AArch64::W16); 436 437 // SME tiles are not allocatable. 438 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) { 439 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA)) 440 Reserved.set(SubReg); 441 } 442 443 markSuperRegs(Reserved, AArch64::FPCR); 444 445 assert(checkAllSuperRegsMarked(Reserved)); 446 return Reserved; 447 } 448 449 BitVector 450 AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 451 BitVector Reserved = getStrictlyReservedRegs(MF); 452 453 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) { 454 if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReservedForRA(i)) 455 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i)); 456 } 457 458 assert(checkAllSuperRegsMarked(Reserved)); 459 return Reserved; 460 } 461 462 bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF, 463 MCRegister Reg) const { 464 return getReservedRegs(MF)[Reg]; 465 } 466 467 bool AArch64RegisterInfo::isStrictlyReservedReg(const MachineFunction &MF, 468 MCRegister Reg) const { 469 return getStrictlyReservedRegs(MF)[Reg]; 470 } 471 472 bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const { 473 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) { 474 return isStrictlyReservedReg(MF, r); 475 }); 476 } 477 478 void AArch64RegisterInfo::emitReservedArgRegCallError( 479 const MachineFunction &MF) const { 480 const Function &F = MF.getFunction(); 481 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support" 482 " function calls if any of the argument registers is reserved.")}); 483 } 484 485 bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF, 486 MCRegister PhysReg) const { 487 // SLH uses register X16 as the taint register but it will fallback to a different 488 // method if the user clobbers it. So X16 is not reserved for inline asm but is 489 // for normal codegen. 490 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) && 491 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16)) 492 return true; 493 494 return !isReservedReg(MF, PhysReg); 495 } 496 497 const TargetRegisterClass * 498 AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF, 499 unsigned Kind) const { 500 return &AArch64::GPR64spRegClass; 501 } 502 503 const TargetRegisterClass * 504 AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 505 if (RC == &AArch64::CCRRegClass) 506 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV. 507 return RC; 508 } 509 510 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; } 511 512 bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 513 const MachineFrameInfo &MFI = MF.getFrameInfo(); 514 515 // In the presence of variable sized objects or funclets, if the fixed stack 516 // size is large enough that referencing from the FP won't result in things 517 // being in range relatively often, we can use a base pointer to allow access 518 // from the other direction like the SP normally works. 519 // 520 // Furthermore, if both variable sized objects are present, and the 521 // stack needs to be dynamically re-aligned, the base pointer is the only 522 // reliable way to reference the locals. 523 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) { 524 if (hasStackRealignment(MF)) 525 return true; 526 527 if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) { 528 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 529 // Frames that have variable sized objects and scalable SVE objects, 530 // should always use a basepointer. 531 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE()) 532 return true; 533 } 534 535 // Conservatively estimate whether the negative offset from the frame 536 // pointer will be sufficient to reach. If a function has a smallish 537 // frame, it's less likely to have lots of spills and callee saved 538 // space, so it's all more likely to be within range of the frame pointer. 539 // If it's wrong, we'll materialize the constant and still get to the 540 // object; it's just suboptimal. Negative offsets use the unscaled 541 // load/store instructions, which have a 9-bit signed immediate. 542 return MFI.getLocalFrameSize() >= 256; 543 } 544 545 return false; 546 } 547 548 bool AArch64RegisterInfo::isArgumentRegister(const MachineFunction &MF, 549 MCRegister Reg) const { 550 CallingConv::ID CC = MF.getFunction().getCallingConv(); 551 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 552 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv()); 553 554 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) { 555 return llvm::is_contained(RegList, Reg); 556 }; 557 558 switch (CC) { 559 default: 560 report_fatal_error("Unsupported calling convention."); 561 case CallingConv::WebKit_JS: 562 return HasReg(CC_AArch64_WebKit_JS_ArgRegs, Reg); 563 case CallingConv::GHC: 564 return HasReg(CC_AArch64_GHC_ArgRegs, Reg); 565 case CallingConv::C: 566 case CallingConv::Fast: 567 case CallingConv::PreserveMost: 568 case CallingConv::PreserveAll: 569 case CallingConv::CXX_FAST_TLS: 570 case CallingConv::Swift: 571 case CallingConv::SwiftTail: 572 case CallingConv::Tail: 573 if (STI.isTargetWindows() && IsVarArg) 574 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg); 575 if (!STI.isTargetDarwin()) { 576 switch (CC) { 577 default: 578 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg); 579 case CallingConv::Swift: 580 case CallingConv::SwiftTail: 581 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) || 582 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg); 583 } 584 } 585 if (!IsVarArg) { 586 switch (CC) { 587 default: 588 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg); 589 case CallingConv::Swift: 590 case CallingConv::SwiftTail: 591 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) || 592 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg); 593 } 594 } 595 if (STI.isTargetILP32()) 596 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg); 597 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg); 598 case CallingConv::Win64: 599 if (IsVarArg) 600 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg); 601 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg); 602 case CallingConv::CFGuard_Check: 603 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg); 604 case CallingConv::AArch64_VectorCall: 605 case CallingConv::AArch64_SVE_VectorCall: 606 case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0: 607 case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2: 608 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg); 609 } 610 } 611 612 Register 613 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 614 const AArch64FrameLowering *TFI = getFrameLowering(MF); 615 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP; 616 } 617 618 bool AArch64RegisterInfo::requiresRegisterScavenging( 619 const MachineFunction &MF) const { 620 return true; 621 } 622 623 bool AArch64RegisterInfo::requiresVirtualBaseRegisters( 624 const MachineFunction &MF) const { 625 return true; 626 } 627 628 bool 629 AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { 630 // This function indicates whether the emergency spillslot should be placed 631 // close to the beginning of the stackframe (closer to FP) or the end 632 // (closer to SP). 633 // 634 // The beginning works most reliably if we have a frame pointer. 635 // In the presence of any non-constant space between FP and locals, 636 // (e.g. in case of stack realignment or a scalable SVE area), it is 637 // better to use SP or BP. 638 const AArch64FrameLowering &TFI = *getFrameLowering(MF); 639 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 640 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() || 641 AFI->hasCalculatedStackSizeSVE()) && 642 "Expected SVE area to be calculated by this point"); 643 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE(); 644 } 645 646 bool AArch64RegisterInfo::requiresFrameIndexScavenging( 647 const MachineFunction &MF) const { 648 return true; 649 } 650 651 bool 652 AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const { 653 const MachineFrameInfo &MFI = MF.getFrameInfo(); 654 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 655 return true; 656 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken(); 657 } 658 659 /// needsFrameBaseReg - Returns true if the instruction's frame index 660 /// reference would be better served by a base register other than FP 661 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 662 /// references it should create new base registers for. 663 bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI, 664 int64_t Offset) const { 665 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) 666 assert(i < MI->getNumOperands() && 667 "Instr doesn't have FrameIndex operand!"); 668 669 // It's the load/store FI references that cause issues, as it can be difficult 670 // to materialize the offset if it won't fit in the literal field. Estimate 671 // based on the size of the local frame and some conservative assumptions 672 // about the rest of the stack frame (note, this is pre-regalloc, so 673 // we don't know everything for certain yet) whether this offset is likely 674 // to be out of range of the immediate. Return true if so. 675 676 // We only generate virtual base registers for loads and stores, so 677 // return false for everything else. 678 if (!MI->mayLoad() && !MI->mayStore()) 679 return false; 680 681 // Without a virtual base register, if the function has variable sized 682 // objects, all fixed-size local references will be via the frame pointer, 683 // Approximate the offset and see if it's legal for the instruction. 684 // Note that the incoming offset is based on the SP value at function entry, 685 // so it'll be negative. 686 MachineFunction &MF = *MI->getParent()->getParent(); 687 const AArch64FrameLowering *TFI = getFrameLowering(MF); 688 MachineFrameInfo &MFI = MF.getFrameInfo(); 689 690 // Estimate an offset from the frame pointer. 691 // Conservatively assume all GPR callee-saved registers get pushed. 692 // FP, LR, X19-X28, D8-D15. 64-bits each. 693 int64_t FPOffset = Offset - 16 * 20; 694 // Estimate an offset from the stack pointer. 695 // The incoming offset is relating to the SP at the start of the function, 696 // but when we access the local it'll be relative to the SP after local 697 // allocation, so adjust our SP-relative offset by that allocation size. 698 Offset += MFI.getLocalFrameSize(); 699 // Assume that we'll have at least some spill slots allocated. 700 // FIXME: This is a total SWAG number. We should run some statistics 701 // and pick a real one. 702 Offset += 128; // 128 bytes of spill slots 703 704 // If there is a frame pointer, try using it. 705 // The FP is only available if there is no dynamic realignment. We 706 // don't know for sure yet whether we'll need that, so we guess based 707 // on whether there are any local variables that would trigger it. 708 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset)) 709 return false; 710 711 // If we can reference via the stack pointer or base pointer, try that. 712 // FIXME: This (and the code that resolves the references) can be improved 713 // to only disallow SP relative references in the live range of 714 // the VLA(s). In practice, it's unclear how much difference that 715 // would make, but it may be worth doing. 716 if (isFrameOffsetLegal(MI, AArch64::SP, Offset)) 717 return false; 718 719 // If even offset 0 is illegal, we don't want a virtual base register. 720 if (!isFrameOffsetLegal(MI, AArch64::SP, 0)) 721 return false; 722 723 // The offset likely isn't legal; we want to allocate a virtual base register. 724 return true; 725 } 726 727 bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 728 Register BaseReg, 729 int64_t Offset) const { 730 assert(MI && "Unable to get the legal offset for nil instruction."); 731 StackOffset SaveOffset = StackOffset::getFixed(Offset); 732 return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal; 733 } 734 735 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx 736 /// at the beginning of the basic block. 737 Register 738 AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 739 int FrameIdx, 740 int64_t Offset) const { 741 MachineBasicBlock::iterator Ins = MBB->begin(); 742 DebugLoc DL; // Defaults to "unknown" 743 if (Ins != MBB->end()) 744 DL = Ins->getDebugLoc(); 745 const MachineFunction &MF = *MBB->getParent(); 746 const AArch64InstrInfo *TII = 747 MF.getSubtarget<AArch64Subtarget>().getInstrInfo(); 748 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri); 749 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 750 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass); 751 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF)); 752 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0); 753 754 BuildMI(*MBB, Ins, DL, MCID, BaseReg) 755 .addFrameIndex(FrameIdx) 756 .addImm(Offset) 757 .addImm(Shifter); 758 759 return BaseReg; 760 } 761 762 void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 763 int64_t Offset) const { 764 // ARM doesn't need the general 64-bit offsets 765 StackOffset Off = StackOffset::getFixed(Offset); 766 767 unsigned i = 0; 768 while (!MI.getOperand(i).isFI()) { 769 ++i; 770 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 771 } 772 773 const MachineFunction *MF = MI.getParent()->getParent(); 774 const AArch64InstrInfo *TII = 775 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 776 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII); 777 assert(Done && "Unable to resolve frame index!"); 778 (void)Done; 779 } 780 781 // Create a scratch register for the frame index elimination in an instruction. 782 // This function has special handling of stack tagging loop pseudos, in which 783 // case it can also change the instruction opcode. 784 static Register 785 createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, 786 const AArch64InstrInfo *TII) { 787 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also 788 // replace the instruction with the writeback variant because it will now 789 // satisfy the operand constraints for it. 790 Register ScratchReg; 791 if (MI.getOpcode() == AArch64::STGloop || 792 MI.getOpcode() == AArch64::STZGloop) { 793 assert(FIOperandNum == 3 && 794 "Wrong frame index operand for STGloop/STZGloop"); 795 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback 796 : AArch64::STZGloop_wback; 797 ScratchReg = MI.getOperand(1).getReg(); 798 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true); 799 MI.setDesc(TII->get(Op)); 800 MI.tieOperands(1, 3); 801 } else { 802 ScratchReg = 803 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); 804 MI.getOperand(FIOperandNum) 805 .ChangeToRegister(ScratchReg, false, false, true); 806 } 807 return ScratchReg; 808 } 809 810 void AArch64RegisterInfo::getOffsetOpcodes( 811 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const { 812 // The smallest scalable element supported by scaled SVE addressing 813 // modes are predicates, which are 2 scalable bytes in size. So the scalable 814 // byte offset must always be a multiple of 2. 815 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset"); 816 817 // Add fixed-sized offset using existing DIExpression interface. 818 DIExpression::appendOffset(Ops, Offset.getFixed()); 819 820 unsigned VG = getDwarfRegNum(AArch64::VG, true); 821 int64_t VGSized = Offset.getScalable() / 2; 822 if (VGSized > 0) { 823 Ops.push_back(dwarf::DW_OP_constu); 824 Ops.push_back(VGSized); 825 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL}); 826 Ops.push_back(dwarf::DW_OP_mul); 827 Ops.push_back(dwarf::DW_OP_plus); 828 } else if (VGSized < 0) { 829 Ops.push_back(dwarf::DW_OP_constu); 830 Ops.push_back(-VGSized); 831 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL}); 832 Ops.push_back(dwarf::DW_OP_mul); 833 Ops.push_back(dwarf::DW_OP_minus); 834 } 835 } 836 837 bool AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 838 int SPAdj, unsigned FIOperandNum, 839 RegScavenger *RS) const { 840 assert(SPAdj == 0 && "Unexpected"); 841 842 MachineInstr &MI = *II; 843 MachineBasicBlock &MBB = *MI.getParent(); 844 MachineFunction &MF = *MBB.getParent(); 845 const MachineFrameInfo &MFI = MF.getFrameInfo(); 846 const AArch64InstrInfo *TII = 847 MF.getSubtarget<AArch64Subtarget>().getInstrInfo(); 848 const AArch64FrameLowering *TFI = getFrameLowering(MF); 849 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 850 bool Tagged = 851 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED; 852 Register FrameReg; 853 854 // Special handling of dbg_value, stackmap patchpoint statepoint instructions. 855 if (MI.getOpcode() == TargetOpcode::STACKMAP || 856 MI.getOpcode() == TargetOpcode::PATCHPOINT || 857 MI.getOpcode() == TargetOpcode::STATEPOINT) { 858 StackOffset Offset = 859 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, 860 /*PreferFP=*/true, 861 /*ForSimm=*/false); 862 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm()); 863 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/); 864 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed()); 865 return false; 866 } 867 868 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) { 869 MachineOperand &FI = MI.getOperand(FIOperandNum); 870 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex); 871 assert(!Offset.getScalable() && 872 "Frame offsets with a scalable component are not supported"); 873 FI.ChangeToImmediate(Offset.getFixed()); 874 return false; 875 } 876 877 StackOffset Offset; 878 if (MI.getOpcode() == AArch64::TAGPstack) { 879 // TAGPstack must use the virtual frame register in its 3rd operand. 880 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 881 FrameReg = MI.getOperand(3).getReg(); 882 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) + 883 AFI->getTaggedBasePointerOffset()); 884 } else if (Tagged) { 885 StackOffset SPOffset = StackOffset::getFixed( 886 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize()); 887 if (MFI.hasVarSizedObjects() || 888 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) != 889 (AArch64FrameOffsetCanUpdate | AArch64FrameOffsetIsLegal)) { 890 // Can't update to SP + offset in place. Precalculate the tagged pointer 891 // in a scratch register. 892 Offset = TFI->resolveFrameIndexReference( 893 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true); 894 Register ScratchReg = 895 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); 896 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, 897 TII); 898 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg) 899 .addReg(ScratchReg) 900 .addReg(ScratchReg) 901 .addImm(0); 902 MI.getOperand(FIOperandNum) 903 .ChangeToRegister(ScratchReg, false, false, true); 904 return false; 905 } 906 FrameReg = AArch64::SP; 907 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) + 908 (int64_t)MFI.getStackSize()); 909 } else { 910 Offset = TFI->resolveFrameIndexReference( 911 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true); 912 } 913 914 // Modify MI as necessary to handle as much of 'Offset' as possible 915 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII)) 916 return true; 917 918 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) && 919 "Emergency spill slot is out of reach"); 920 921 // If we get here, the immediate doesn't fit into the instruction. We folded 922 // as much as possible above. Handle the rest, providing a register that is 923 // SP+LargeImm. 924 Register ScratchReg = 925 createScratchRegisterForInstruction(MI, FIOperandNum, TII); 926 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII); 927 return false; 928 } 929 930 unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 931 MachineFunction &MF) const { 932 const AArch64FrameLowering *TFI = getFrameLowering(MF); 933 934 switch (RC->getID()) { 935 default: 936 return 0; 937 case AArch64::GPR32RegClassID: 938 case AArch64::GPR32spRegClassID: 939 case AArch64::GPR32allRegClassID: 940 case AArch64::GPR64spRegClassID: 941 case AArch64::GPR64allRegClassID: 942 case AArch64::GPR64RegClassID: 943 case AArch64::GPR32commonRegClassID: 944 case AArch64::GPR64commonRegClassID: 945 return 32 - 1 // XZR/SP 946 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP 947 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved() 948 - hasBasePointer(MF); // X19 949 case AArch64::FPR8RegClassID: 950 case AArch64::FPR16RegClassID: 951 case AArch64::FPR32RegClassID: 952 case AArch64::FPR64RegClassID: 953 case AArch64::FPR128RegClassID: 954 return 32; 955 956 case AArch64::MatrixIndexGPR32_8_11RegClassID: 957 case AArch64::MatrixIndexGPR32_12_15RegClassID: 958 return 4; 959 960 case AArch64::DDRegClassID: 961 case AArch64::DDDRegClassID: 962 case AArch64::DDDDRegClassID: 963 case AArch64::QQRegClassID: 964 case AArch64::QQQRegClassID: 965 case AArch64::QQQQRegClassID: 966 return 32; 967 968 case AArch64::FPR128_loRegClassID: 969 case AArch64::FPR64_loRegClassID: 970 case AArch64::FPR16_loRegClassID: 971 return 16; 972 } 973 } 974 975 unsigned AArch64RegisterInfo::getLocalAddressRegister( 976 const MachineFunction &MF) const { 977 const auto &MFI = MF.getFrameInfo(); 978 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects()) 979 return AArch64::SP; 980 else if (hasStackRealignment(MF)) 981 return getBaseRegister(); 982 return getFrameRegister(MF); 983 } 984 985 /// SrcRC and DstRC will be morphed into NewRC if this returns true 986 bool AArch64RegisterInfo::shouldCoalesce( 987 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, 988 const TargetRegisterClass *DstRC, unsigned DstSubReg, 989 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const { 990 if (MI->isCopy() && 991 ((DstRC->getID() == AArch64::GPR64RegClassID) || 992 (DstRC->getID() == AArch64::GPR64commonRegClassID)) && 993 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg()) 994 // Do not coalesce in the case of a 32-bit subregister copy 995 // which implements a 32 to 64 bit zero extension 996 // which relies on the upper 32 bits being zeroed. 997 return false; 998 return true; 999 } 1000