1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {} 59 60 static unsigned getFramePointerReg(const ARMSubtarget &STI) { 61 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11; 62 } 63 64 const MCPhysReg* 65 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 66 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 67 bool UseSplitPush = STI.splitFramePushPop(*MF); 68 const MCPhysReg *RegList = 69 STI.isTargetDarwin() 70 ? CSR_iOS_SaveList 71 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); 72 73 const Function &F = MF->getFunction(); 74 if (F.getCallingConv() == CallingConv::GHC) { 75 // GHC set of callee saved regs is empty as all those regs are 76 // used for passing STG regs around 77 return CSR_NoRegs_SaveList; 78 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) { 79 return CSR_Win_AAPCS_CFGuard_Check_SaveList; 80 } else if (F.hasFnAttribute("interrupt")) { 81 if (STI.isMClass()) { 82 // M-class CPUs have hardware which saves the registers needed to allow a 83 // function conforming to the AAPCS to function as a handler. 84 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; 85 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 86 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 87 // need to be saved to restore user-mode state. 88 return CSR_FIQ_SaveList; 89 } else { 90 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 91 // exception handling. 92 return CSR_GenericInt_SaveList; 93 } 94 } 95 96 if (STI.getTargetLowering()->supportSwiftError() && 97 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 98 if (STI.isTargetDarwin()) 99 return CSR_iOS_SwiftError_SaveList; 100 101 return UseSplitPush ? CSR_AAPCS_SplitPush_SwiftError_SaveList : 102 CSR_AAPCS_SwiftError_SaveList; 103 } 104 105 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 106 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 107 ? CSR_iOS_CXX_TLS_PE_SaveList 108 : CSR_iOS_CXX_TLS_SaveList; 109 return RegList; 110 } 111 112 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 113 const MachineFunction *MF) const { 114 assert(MF && "Invalid MachineFunction pointer."); 115 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 116 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 117 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 118 return nullptr; 119 } 120 121 const uint32_t * 122 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 123 CallingConv::ID CC) const { 124 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 125 if (CC == CallingConv::GHC) 126 // This is academic because all GHC calls are (supposed to be) tail calls 127 return CSR_NoRegs_RegMask; 128 if (CC == CallingConv::CFGuard_Check) 129 return CSR_Win_AAPCS_CFGuard_Check_RegMask; 130 if (STI.getTargetLowering()->supportSwiftError() && 131 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 132 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 133 : CSR_AAPCS_SwiftError_RegMask; 134 135 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 136 return CSR_iOS_CXX_TLS_RegMask; 137 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 138 } 139 140 const uint32_t* 141 ARMBaseRegisterInfo::getNoPreservedMask() const { 142 return CSR_NoRegs_RegMask; 143 } 144 145 const uint32_t * 146 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 147 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 148 "only know about special TLS call on Darwin"); 149 return CSR_iOS_TLSCall_RegMask; 150 } 151 152 const uint32_t * 153 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 154 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 155 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 156 return CSR_NoRegs_RegMask; 157 else 158 return CSR_FPRegs_RegMask; 159 } 160 161 const uint32_t * 162 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 163 CallingConv::ID CC) const { 164 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 165 // This should return a register mask that is the same as that returned by 166 // getCallPreservedMask but that additionally preserves the register used for 167 // the first i32 argument (which must also be the register used to return a 168 // single i32 return value) 169 // 170 // In case that the calling convention does not use the same register for 171 // both or otherwise does not want to enable this optimization, the function 172 // should return NULL 173 if (CC == CallingConv::GHC) 174 // This is academic because all GHC calls are (supposed to be) tail calls 175 return nullptr; 176 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 177 : CSR_AAPCS_ThisReturn_RegMask; 178 } 179 180 ArrayRef<MCPhysReg> ARMBaseRegisterInfo::getIntraCallClobberedRegs( 181 const MachineFunction *MF) const { 182 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12}; 183 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs); 184 } 185 186 BitVector ARMBaseRegisterInfo:: 187 getReservedRegs(const MachineFunction &MF) const { 188 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 189 const ARMFrameLowering *TFI = getFrameLowering(MF); 190 191 // FIXME: avoid re-calculating this every time. 192 BitVector Reserved(getNumRegs()); 193 markSuperRegs(Reserved, ARM::SP); 194 markSuperRegs(Reserved, ARM::PC); 195 markSuperRegs(Reserved, ARM::FPSCR); 196 markSuperRegs(Reserved, ARM::APSR_NZCV); 197 if (TFI->hasFP(MF)) 198 markSuperRegs(Reserved, getFramePointerReg(STI)); 199 if (hasBasePointer(MF)) 200 markSuperRegs(Reserved, BasePtr); 201 // Some targets reserve R9. 202 if (STI.isR9Reserved()) 203 markSuperRegs(Reserved, ARM::R9); 204 // Reserve D16-D31 if the subtarget doesn't support them. 205 if (!STI.hasD32()) { 206 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 207 for (unsigned R = 0; R < 16; ++R) 208 markSuperRegs(Reserved, ARM::D16 + R); 209 } 210 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 211 for (unsigned Reg : RC) 212 for (MCSubRegIterator SI(Reg, this); SI.isValid(); ++SI) 213 if (Reserved.test(*SI)) 214 markSuperRegs(Reserved, Reg); 215 // For v8.1m architecture 216 markSuperRegs(Reserved, ARM::ZR); 217 218 assert(checkAllSuperRegsMarked(Reserved)); 219 return Reserved; 220 } 221 222 bool ARMBaseRegisterInfo:: 223 isAsmClobberable(const MachineFunction &MF, unsigned PhysReg) const { 224 return !getReservedRegs(MF).test(PhysReg); 225 } 226 227 const TargetRegisterClass * 228 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 229 const MachineFunction &MF) const { 230 const TargetRegisterClass *Super = RC; 231 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 232 do { 233 switch (Super->getID()) { 234 case ARM::GPRRegClassID: 235 case ARM::SPRRegClassID: 236 case ARM::DPRRegClassID: 237 case ARM::GPRPairRegClassID: 238 return Super; 239 case ARM::QPRRegClassID: 240 case ARM::QQPRRegClassID: 241 case ARM::QQQQPRRegClassID: 242 if (MF.getSubtarget<ARMSubtarget>().hasNEON()) 243 return Super; 244 } 245 Super = *I++; 246 } while (Super); 247 return RC; 248 } 249 250 const TargetRegisterClass * 251 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 252 const { 253 return &ARM::GPRRegClass; 254 } 255 256 const TargetRegisterClass * 257 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 258 if (RC == &ARM::CCRRegClass) 259 return &ARM::rGPRRegClass; // Can't copy CCR registers. 260 return RC; 261 } 262 263 unsigned 264 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 265 MachineFunction &MF) const { 266 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 267 const ARMFrameLowering *TFI = getFrameLowering(MF); 268 269 switch (RC->getID()) { 270 default: 271 return 0; 272 case ARM::tGPRRegClassID: { 273 // hasFP ends up calling getMaxCallFrameComputed() which may not be 274 // available when getPressureLimit() is called as part of 275 // ScheduleDAGRRList. 276 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 277 ? TFI->hasFP(MF) : true; 278 return 5 - HasFP; 279 } 280 case ARM::GPRRegClassID: { 281 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 282 ? TFI->hasFP(MF) : true; 283 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 284 } 285 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 286 case ARM::DPRRegClassID: 287 return 32 - 10; 288 } 289 } 290 291 // Get the other register in a GPRPair. 292 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) { 293 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers) 294 if (ARM::GPRPairRegClass.contains(*Supers)) 295 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0); 296 return 0; 297 } 298 299 // Resolve the RegPairEven / RegPairOdd register allocator hints. 300 bool 301 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg, 302 ArrayRef<MCPhysReg> Order, 303 SmallVectorImpl<MCPhysReg> &Hints, 304 const MachineFunction &MF, 305 const VirtRegMap *VRM, 306 const LiveRegMatrix *Matrix) const { 307 const MachineRegisterInfo &MRI = MF.getRegInfo(); 308 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg); 309 310 unsigned Odd; 311 switch (Hint.first) { 312 case ARMRI::RegPairEven: 313 Odd = 0; 314 break; 315 case ARMRI::RegPairOdd: 316 Odd = 1; 317 break; 318 default: 319 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 320 return false; 321 } 322 323 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 324 // Check if the other part of the pair has already been assigned, and provide 325 // the paired register as the first hint. 326 unsigned Paired = Hint.second; 327 if (Paired == 0) 328 return false; 329 330 unsigned PairedPhys = 0; 331 if (Register::isPhysicalRegister(Paired)) { 332 PairedPhys = Paired; 333 } else if (VRM && VRM->hasPhys(Paired)) { 334 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 335 } 336 337 // First prefer the paired physreg. 338 if (PairedPhys && is_contained(Order, PairedPhys)) 339 Hints.push_back(PairedPhys); 340 341 // Then prefer even or odd registers. 342 for (unsigned Reg : Order) { 343 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 344 continue; 345 // Don't provide hints that are paired to a reserved register. 346 unsigned Paired = getPairedGPR(Reg, !Odd, this); 347 if (!Paired || MRI.isReserved(Paired)) 348 continue; 349 Hints.push_back(Reg); 350 } 351 return false; 352 } 353 354 void 355 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg, 356 MachineFunction &MF) const { 357 MachineRegisterInfo *MRI = &MF.getRegInfo(); 358 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg); 359 if ((Hint.first == (unsigned)ARMRI::RegPairOdd || 360 Hint.first == (unsigned)ARMRI::RegPairEven) && 361 Register::isVirtualRegister(Hint.second)) { 362 // If 'Reg' is one of the even / odd register pair and it's now changed 363 // (e.g. coalesced) into a different register. The other register of the 364 // pair allocation hint must be updated to reflect the relationship 365 // change. 366 unsigned OtherReg = Hint.second; 367 Hint = MRI->getRegAllocationHint(OtherReg); 368 // Make sure the pair has not already divorced. 369 if (Hint.second == Reg) { 370 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 371 if (Register::isVirtualRegister(NewReg)) 372 MRI->setRegAllocationHint(NewReg, 373 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven 374 : ARMRI::RegPairOdd, OtherReg); 375 } 376 } 377 } 378 379 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 380 const MachineFrameInfo &MFI = MF.getFrameInfo(); 381 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 382 const ARMFrameLowering *TFI = getFrameLowering(MF); 383 384 // If we have stack realignment and VLAs, we have no pointer to use to 385 // access the stack. If we have stack realignment, and a large call frame, 386 // we have no place to allocate the emergency spill slot. 387 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 388 return true; 389 390 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 391 // negative range for ldr/str (255), and Thumb1 is positive offsets only. 392 // 393 // It's going to be better to use the SP or Base Pointer instead. When there 394 // are variable sized objects, we can't reference off of the SP, so we 395 // reserve a Base Pointer. 396 // 397 // For Thumb2, estimate whether a negative offset from the frame pointer 398 // will be sufficient to reach the whole stack frame. If a function has a 399 // smallish frame, it's less likely to have lots of spills and callee saved 400 // space, so it's all more likely to be within range of the frame pointer. 401 // If it's wrong, the scavenger will still enable access to work, it just 402 // won't be optimal. (We should always be able to reach the emergency 403 // spill slot from the frame pointer.) 404 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() && 405 MFI.getLocalFrameSize() >= 128) 406 return true; 407 // For Thumb1, if sp moves, nothing is in range, so force a base pointer. 408 // This is necessary for correctness in cases where we need an emergency 409 // spill slot. (In Thumb1, we can't use a negative offset from the frame 410 // pointer.) 411 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF)) 412 return true; 413 return false; 414 } 415 416 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 417 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 418 const ARMFrameLowering *TFI = getFrameLowering(MF); 419 // We can't realign the stack if: 420 // 1. Dynamic stack realignment is explicitly disabled, 421 // 2. There are VLAs in the function and the base pointer is disabled. 422 if (!TargetRegisterInfo::canRealignStack(MF)) 423 return false; 424 // Stack realignment requires a frame pointer. If we already started 425 // register allocation with frame pointer elimination, it is too late now. 426 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>()))) 427 return false; 428 // We may also need a base pointer if there are dynamic allocas or stack 429 // pointer adjustments around calls. 430 if (TFI->hasReservedCallFrame(MF)) 431 return true; 432 // A base pointer is required and allowed. Check that it isn't too late to 433 // reserve it. 434 return MRI->canReserveReg(BasePtr); 435 } 436 437 bool ARMBaseRegisterInfo:: 438 cannotEliminateFrame(const MachineFunction &MF) const { 439 const MachineFrameInfo &MFI = MF.getFrameInfo(); 440 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 441 return true; 442 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() 443 || needsStackRealignment(MF); 444 } 445 446 Register 447 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 448 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 449 const ARMFrameLowering *TFI = getFrameLowering(MF); 450 451 if (TFI->hasFP(MF)) 452 return getFramePointerReg(STI); 453 return ARM::SP; 454 } 455 456 /// emitLoadConstPool - Emits a load from constpool to materialize the 457 /// specified immediate. 458 void ARMBaseRegisterInfo::emitLoadConstPool( 459 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 460 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val, 461 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const { 462 MachineFunction &MF = *MBB.getParent(); 463 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 464 MachineConstantPool *ConstantPool = MF.getConstantPool(); 465 const Constant *C = 466 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 467 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); 468 469 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 470 .addReg(DestReg, getDefRegState(true), SubIdx) 471 .addConstantPoolIndex(Idx) 472 .addImm(0) 473 .add(predOps(Pred, PredReg)) 474 .setMIFlags(MIFlags); 475 } 476 477 bool ARMBaseRegisterInfo:: 478 requiresRegisterScavenging(const MachineFunction &MF) const { 479 return true; 480 } 481 482 bool ARMBaseRegisterInfo:: 483 trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 484 return true; 485 } 486 487 bool ARMBaseRegisterInfo:: 488 requiresFrameIndexScavenging(const MachineFunction &MF) const { 489 return true; 490 } 491 492 bool ARMBaseRegisterInfo:: 493 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 494 return true; 495 } 496 497 int64_t ARMBaseRegisterInfo:: 498 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 499 const MCInstrDesc &Desc = MI->getDesc(); 500 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 501 int64_t InstrOffs = 0; 502 int Scale = 1; 503 unsigned ImmIdx = 0; 504 switch (AddrMode) { 505 case ARMII::AddrModeT2_i8: 506 case ARMII::AddrModeT2_i12: 507 case ARMII::AddrMode_i12: 508 InstrOffs = MI->getOperand(Idx+1).getImm(); 509 Scale = 1; 510 break; 511 case ARMII::AddrMode5: { 512 // VFP address mode. 513 const MachineOperand &OffOp = MI->getOperand(Idx+1); 514 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 515 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 516 InstrOffs = -InstrOffs; 517 Scale = 4; 518 break; 519 } 520 case ARMII::AddrMode2: 521 ImmIdx = Idx+2; 522 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 523 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 524 InstrOffs = -InstrOffs; 525 break; 526 case ARMII::AddrMode3: 527 ImmIdx = Idx+2; 528 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 529 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 530 InstrOffs = -InstrOffs; 531 break; 532 case ARMII::AddrModeT1_s: 533 ImmIdx = Idx+1; 534 InstrOffs = MI->getOperand(ImmIdx).getImm(); 535 Scale = 4; 536 break; 537 default: 538 llvm_unreachable("Unsupported addressing mode!"); 539 } 540 541 return InstrOffs * Scale; 542 } 543 544 /// needsFrameBaseReg - Returns true if the instruction's frame index 545 /// reference would be better served by a base register other than FP 546 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 547 /// references it should create new base registers for. 548 bool ARMBaseRegisterInfo:: 549 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 550 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 551 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 552 } 553 554 // It's the load/store FI references that cause issues, as it can be difficult 555 // to materialize the offset if it won't fit in the literal field. Estimate 556 // based on the size of the local frame and some conservative assumptions 557 // about the rest of the stack frame (note, this is pre-regalloc, so 558 // we don't know everything for certain yet) whether this offset is likely 559 // to be out of range of the immediate. Return true if so. 560 561 // We only generate virtual base registers for loads and stores, so 562 // return false for everything else. 563 unsigned Opc = MI->getOpcode(); 564 switch (Opc) { 565 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 566 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 567 case ARM::t2LDRi12: case ARM::t2LDRi8: 568 case ARM::t2STRi12: case ARM::t2STRi8: 569 case ARM::VLDRS: case ARM::VLDRD: 570 case ARM::VSTRS: case ARM::VSTRD: 571 case ARM::tSTRspi: case ARM::tLDRspi: 572 break; 573 default: 574 return false; 575 } 576 577 // Without a virtual base register, if the function has variable sized 578 // objects, all fixed-size local references will be via the frame pointer, 579 // Approximate the offset and see if it's legal for the instruction. 580 // Note that the incoming offset is based on the SP value at function entry, 581 // so it'll be negative. 582 MachineFunction &MF = *MI->getParent()->getParent(); 583 const ARMFrameLowering *TFI = getFrameLowering(MF); 584 MachineFrameInfo &MFI = MF.getFrameInfo(); 585 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 586 587 // Estimate an offset from the frame pointer. 588 // Conservatively assume all callee-saved registers get pushed. R4-R6 589 // will be earlier than the FP, so we ignore those. 590 // R7, LR 591 int64_t FPOffset = Offset - 8; 592 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 593 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 594 FPOffset -= 80; 595 // Estimate an offset from the stack pointer. 596 // The incoming offset is relating to the SP at the start of the function, 597 // but when we access the local it'll be relative to the SP after local 598 // allocation, so adjust our SP-relative offset by that allocation size. 599 Offset += MFI.getLocalFrameSize(); 600 // Assume that we'll have at least some spill slots allocated. 601 // FIXME: This is a total SWAG number. We should run some statistics 602 // and pick a real one. 603 Offset += 128; // 128 bytes of spill slots 604 605 // If there's a frame pointer and the addressing mode allows it, try using it. 606 // The FP is only available if there is no dynamic realignment. We 607 // don't know for sure yet whether we'll need that, so we guess based 608 // on whether there are any local variables that would trigger it. 609 unsigned StackAlign = TFI->getStackAlignment(); 610 if (TFI->hasFP(MF) && 611 !((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) { 612 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 613 return false; 614 } 615 // If we can reference via the stack pointer, try that. 616 // FIXME: This (and the code that resolves the references) can be improved 617 // to only disallow SP relative references in the live range of 618 // the VLA(s). In practice, it's unclear how much difference that 619 // would make, but it may be worth doing. 620 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 621 return false; 622 623 // The offset likely isn't legal, we want to allocate a virtual base register. 624 return true; 625 } 626 627 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 628 /// be a pointer to FrameIdx at the beginning of the basic block. 629 void ARMBaseRegisterInfo:: 630 materializeFrameBaseRegister(MachineBasicBlock *MBB, 631 unsigned BaseReg, int FrameIdx, 632 int64_t Offset) const { 633 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 634 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 635 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 636 637 MachineBasicBlock::iterator Ins = MBB->begin(); 638 DebugLoc DL; // Defaults to "unknown" 639 if (Ins != MBB->end()) 640 DL = Ins->getDebugLoc(); 641 642 const MachineFunction &MF = *MBB->getParent(); 643 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 644 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 645 const MCInstrDesc &MCID = TII.get(ADDriOpc); 646 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 647 648 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 649 .addFrameIndex(FrameIdx).addImm(Offset); 650 651 if (!AFI->isThumb1OnlyFunction()) 652 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 653 } 654 655 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, 656 int64_t Offset) const { 657 MachineBasicBlock &MBB = *MI.getParent(); 658 MachineFunction &MF = *MBB.getParent(); 659 const ARMBaseInstrInfo &TII = 660 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 661 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 662 int Off = Offset; // ARM doesn't need the general 64-bit offsets 663 unsigned i = 0; 664 665 assert(!AFI->isThumb1OnlyFunction() && 666 "This resolveFrameIndex does not support Thumb1!"); 667 668 while (!MI.getOperand(i).isFI()) { 669 ++i; 670 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 671 } 672 bool Done = false; 673 if (!AFI->isThumbFunction()) 674 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 675 else { 676 assert(AFI->isThumb2Function()); 677 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this); 678 } 679 assert(Done && "Unable to resolve frame index!"); 680 (void)Done; 681 } 682 683 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg, 684 int64_t Offset) const { 685 const MCInstrDesc &Desc = MI->getDesc(); 686 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 687 unsigned i = 0; 688 for (; !MI->getOperand(i).isFI(); ++i) 689 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 690 691 // AddrMode4 and AddrMode6 cannot handle any offset. 692 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 693 return Offset == 0; 694 695 unsigned NumBits = 0; 696 unsigned Scale = 1; 697 bool isSigned = true; 698 switch (AddrMode) { 699 case ARMII::AddrModeT2_i8: 700 case ARMII::AddrModeT2_i12: 701 // i8 supports only negative, and i12 supports only positive, so 702 // based on Offset sign, consider the appropriate instruction 703 Scale = 1; 704 if (Offset < 0) { 705 NumBits = 8; 706 Offset = -Offset; 707 } else { 708 NumBits = 12; 709 } 710 break; 711 case ARMII::AddrMode5: 712 // VFP address mode. 713 NumBits = 8; 714 Scale = 4; 715 break; 716 case ARMII::AddrMode_i12: 717 case ARMII::AddrMode2: 718 NumBits = 12; 719 break; 720 case ARMII::AddrMode3: 721 NumBits = 8; 722 break; 723 case ARMII::AddrModeT1_s: 724 NumBits = (BaseReg == ARM::SP ? 8 : 5); 725 Scale = 4; 726 isSigned = false; 727 break; 728 default: 729 llvm_unreachable("Unsupported addressing mode!"); 730 } 731 732 Offset += getFrameIndexInstrOffset(MI, i); 733 // Make sure the offset is encodable for instructions that scale the 734 // immediate. 735 if ((Offset & (Scale-1)) != 0) 736 return false; 737 738 if (isSigned && Offset < 0) 739 Offset = -Offset; 740 741 unsigned Mask = (1 << NumBits) - 1; 742 if ((unsigned)Offset <= Mask * Scale) 743 return true; 744 745 return false; 746 } 747 748 void 749 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 750 int SPAdj, unsigned FIOperandNum, 751 RegScavenger *RS) const { 752 MachineInstr &MI = *II; 753 MachineBasicBlock &MBB = *MI.getParent(); 754 MachineFunction &MF = *MBB.getParent(); 755 const ARMBaseInstrInfo &TII = 756 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 757 const ARMFrameLowering *TFI = getFrameLowering(MF); 758 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 759 assert(!AFI->isThumb1OnlyFunction() && 760 "This eliminateFrameIndex does not support Thumb1!"); 761 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 762 unsigned FrameReg; 763 764 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 765 766 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 767 // call frame setup/destroy instructions have already been eliminated. That 768 // means the stack pointer cannot be used to access the emergency spill slot 769 // when !hasReservedCallFrame(). 770 #ifndef NDEBUG 771 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 772 assert(TFI->hasReservedCallFrame(MF) && 773 "Cannot use SP to access the emergency spill slot in " 774 "functions without a reserved call frame"); 775 assert(!MF.getFrameInfo().hasVarSizedObjects() && 776 "Cannot use SP to access the emergency spill slot in " 777 "functions with variable sized frame objects"); 778 } 779 #endif // NDEBUG 780 781 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 782 783 // Modify MI as necessary to handle as much of 'Offset' as possible 784 bool Done = false; 785 if (!AFI->isThumbFunction()) 786 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 787 else { 788 assert(AFI->isThumb2Function()); 789 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this); 790 } 791 if (Done) 792 return; 793 794 // If we get here, the immediate doesn't fit into the instruction. We folded 795 // as much as possible above, handle the rest, providing a register that is 796 // SP+LargeImm. 797 assert( 798 (Offset || 799 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 800 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 || 801 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 || 802 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 || 803 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == 804 ARMII::AddrModeT2_i7s4) && 805 "This code isn't needed if offset already handled!"); 806 807 unsigned ScratchReg = 0; 808 int PIdx = MI.findFirstPredOperandIdx(); 809 ARMCC::CondCodes Pred = (PIdx == -1) 810 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 811 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); 812 813 const MCInstrDesc &MCID = MI.getDesc(); 814 const TargetRegisterClass *RegClass = 815 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent()); 816 817 if (Offset == 0 && 818 (Register::isVirtualRegister(FrameReg) || RegClass->contains(FrameReg))) 819 // Must be addrmode4/6. 820 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 821 else { 822 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass); 823 if (!AFI->isThumbFunction()) 824 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 825 Offset, Pred, PredReg, TII); 826 else { 827 assert(AFI->isThumb2Function()); 828 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 829 Offset, Pred, PredReg, TII); 830 } 831 // Update the original instruction to use the scratch register. 832 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 833 } 834 } 835 836 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 837 const TargetRegisterClass *SrcRC, 838 unsigned SubReg, 839 const TargetRegisterClass *DstRC, 840 unsigned DstSubReg, 841 const TargetRegisterClass *NewRC, 842 LiveIntervals &LIS) const { 843 auto MBB = MI->getParent(); 844 auto MF = MBB->getParent(); 845 const MachineRegisterInfo &MRI = MF->getRegInfo(); 846 // If not copying into a sub-register this should be ok because we shouldn't 847 // need to split the reg. 848 if (!DstSubReg) 849 return true; 850 // Small registers don't frequently cause a problem, so we can coalesce them. 851 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 852 getRegSizeInBits(*SrcRC) < 256) 853 return true; 854 855 auto NewRCWeight = 856 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 857 auto SrcRCWeight = 858 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 859 auto DstRCWeight = 860 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 861 // If the source register class is more expensive than the destination, the 862 // coalescing is probably profitable. 863 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 864 return true; 865 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 866 return true; 867 868 // If the register allocator isn't constrained, we can always allow coalescing 869 // unfortunately we don't know yet if we will be constrained. 870 // The goal of this heuristic is to restrict how many expensive registers 871 // we allow to coalesce in a given basic block. 872 auto AFI = MF->getInfo<ARMFunctionInfo>(); 873 auto It = AFI->getCoalescedWeight(MBB); 874 875 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 876 << It->second << "\n"); 877 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 878 << NewRCWeight.RegWeight << "\n"); 879 880 // This number is the largest round number that which meets the criteria: 881 // (1) addresses PR18825 882 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 883 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 884 // In practice the SizeMultiplier will only factor in for straight line code 885 // that uses a lot of NEON vectors, which isn't terribly common. 886 unsigned SizeMultiplier = MBB->size()/100; 887 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 888 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 889 It->second += NewRCWeight.RegWeight; 890 return true; 891 } 892 return false; 893 } 894