1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZRegisterInfo.h" 10 #include "SystemZInstrInfo.h" 11 #include "SystemZSubtarget.h" 12 #include "llvm/CodeGen/LiveIntervals.h" 13 #include "llvm/ADT/SmallSet.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/VirtRegMap.h" 18 19 using namespace llvm; 20 21 #define GET_REGINFO_TARGET_DESC 22 #include "SystemZGenRegisterInfo.inc" 23 24 SystemZRegisterInfo::SystemZRegisterInfo() 25 : SystemZGenRegisterInfo(SystemZ::R14D) {} 26 27 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 28 // somehow belongs in it. Otherwise, return GRX32. 29 static const TargetRegisterClass *getRC32(MachineOperand &MO, 30 const VirtRegMap *VRM, 31 const MachineRegisterInfo *MRI) { 32 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 33 34 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 35 MO.getSubReg() == SystemZ::subreg_l32 || 36 MO.getSubReg() == SystemZ::subreg_hl32) 37 return &SystemZ::GR32BitRegClass; 38 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 39 MO.getSubReg() == SystemZ::subreg_h32 || 40 MO.getSubReg() == SystemZ::subreg_hh32) 41 return &SystemZ::GRH32BitRegClass; 42 43 if (VRM && VRM->hasPhys(MO.getReg())) { 44 Register PhysReg = VRM->getPhys(MO.getReg()); 45 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 46 return &SystemZ::GR32BitRegClass; 47 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 48 "Phys reg not in GR32 or GRH32?"); 49 return &SystemZ::GRH32BitRegClass; 50 } 51 52 assert (RC == &SystemZ::GRX32BitRegClass); 53 return RC; 54 } 55 56 // Pass the registers of RC as hints while making sure that if any of these 57 // registers are copy hints (and therefore already in Hints), hint them 58 // first. 59 static void addHints(ArrayRef<MCPhysReg> Order, 60 SmallVectorImpl<MCPhysReg> &Hints, 61 const TargetRegisterClass *RC, 62 const MachineRegisterInfo *MRI) { 63 SmallSet<unsigned, 4> CopyHints; 64 CopyHints.insert(Hints.begin(), Hints.end()); 65 Hints.clear(); 66 for (MCPhysReg Reg : Order) 67 if (CopyHints.count(Reg) && 68 RC->contains(Reg) && !MRI->isReserved(Reg)) 69 Hints.push_back(Reg); 70 for (MCPhysReg Reg : Order) 71 if (!CopyHints.count(Reg) && 72 RC->contains(Reg) && !MRI->isReserved(Reg)) 73 Hints.push_back(Reg); 74 } 75 76 bool SystemZRegisterInfo::getRegAllocationHints( 77 Register VirtReg, ArrayRef<MCPhysReg> Order, 78 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 79 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 80 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 81 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 82 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 83 84 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 85 VirtReg, Order, Hints, MF, VRM, Matrix); 86 87 if (VRM != nullptr) { 88 // Add any two address hints after any copy hints. 89 SmallSet<unsigned, 4> TwoAddrHints; 90 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg)) 91 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) { 92 const MachineOperand *VRRegMO = nullptr; 93 const MachineOperand *OtherMO = nullptr; 94 const MachineOperand *CommuMO = nullptr; 95 if (VirtReg == Use.getOperand(0).getReg()) { 96 VRRegMO = &Use.getOperand(0); 97 OtherMO = &Use.getOperand(1); 98 if (Use.isCommutable()) 99 CommuMO = &Use.getOperand(2); 100 } else if (VirtReg == Use.getOperand(1).getReg()) { 101 VRRegMO = &Use.getOperand(1); 102 OtherMO = &Use.getOperand(0); 103 } else if (VirtReg == Use.getOperand(2).getReg() && 104 Use.isCommutable()) { 105 VRRegMO = &Use.getOperand(2); 106 OtherMO = &Use.getOperand(0); 107 } else 108 continue; 109 110 auto tryAddHint = [&](const MachineOperand *MO) -> void { 111 Register Reg = MO->getReg(); 112 Register PhysReg = Register::isPhysicalRegister(Reg) 113 ? Reg 114 : Register(VRM->getPhys(Reg)); 115 if (PhysReg) { 116 if (MO->getSubReg()) 117 PhysReg = getSubReg(PhysReg, MO->getSubReg()); 118 if (VRRegMO->getSubReg()) 119 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(), 120 MRI->getRegClass(VirtReg)); 121 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 122 TwoAddrHints.insert(PhysReg); 123 } 124 }; 125 tryAddHint(OtherMO); 126 if (CommuMO) 127 tryAddHint(CommuMO); 128 } 129 for (MCPhysReg OrderReg : Order) 130 if (TwoAddrHints.count(OrderReg)) 131 Hints.push_back(OrderReg); 132 } 133 134 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 135 SmallVector<Register, 8> Worklist; 136 SmallSet<Register, 4> DoneRegs; 137 Worklist.push_back(VirtReg); 138 while (Worklist.size()) { 139 Register Reg = Worklist.pop_back_val(); 140 if (!DoneRegs.insert(Reg).second) 141 continue; 142 143 for (auto &Use : MRI->reg_instructions(Reg)) { 144 // For LOCRMux, see if the other operand is already a high or low 145 // register, and in that case give the corresponding hints for 146 // VirtReg. LOCR instructions need both operands in either high or 147 // low parts. Same handling for SELRMux. 148 if (Use.getOpcode() == SystemZ::LOCRMux || 149 Use.getOpcode() == SystemZ::SELRMux) { 150 MachineOperand &TrueMO = Use.getOperand(1); 151 MachineOperand &FalseMO = Use.getOperand(2); 152 const TargetRegisterClass *RC = 153 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 154 getRC32(TrueMO, VRM, MRI)); 155 if (Use.getOpcode() == SystemZ::SELRMux) 156 RC = TRI->getCommonSubClass(RC, 157 getRC32(Use.getOperand(0), VRM, MRI)); 158 if (RC && RC != &SystemZ::GRX32BitRegClass) { 159 addHints(Order, Hints, RC, MRI); 160 // Return true to make these hints the only regs available to 161 // RA. This may mean extra spilling but since the alternative is 162 // a jump sequence expansion of the LOCRMux, it is preferred. 163 return true; 164 } 165 166 // Add the other operand of the LOCRMux to the worklist. 167 Register OtherReg = 168 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 169 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 170 Worklist.push_back(OtherReg); 171 } // end LOCRMux 172 else if (Use.getOpcode() == SystemZ::CHIMux || 173 Use.getOpcode() == SystemZ::CFIMux) { 174 if (Use.getOperand(1).getImm() == 0) { 175 bool OnlyLMuxes = true; 176 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg)) 177 if (DefMI.getOpcode() != SystemZ::LMux) 178 OnlyLMuxes = false; 179 if (OnlyLMuxes) { 180 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI); 181 // Return false to make these hints preferred but not obligatory. 182 return false; 183 } 184 } 185 } // end CHIMux / CFIMux 186 } 187 } 188 } 189 190 return BaseImplRetVal; 191 } 192 193 const MCPhysReg * 194 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 195 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 196 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 197 return CSR_SystemZ_NoRegs_SaveList; 198 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 199 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList 200 : CSR_SystemZ_AllRegs_SaveList; 201 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 202 MF->getFunction().getAttributes().hasAttrSomewhere( 203 Attribute::SwiftError)) 204 return CSR_SystemZ_SwiftError_SaveList; 205 return CSR_SystemZ_SaveList; 206 } 207 208 const uint32_t * 209 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 210 CallingConv::ID CC) const { 211 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 212 if (CC == CallingConv::GHC) 213 return CSR_SystemZ_NoRegs_RegMask; 214 if (CC == CallingConv::AnyReg) 215 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask 216 : CSR_SystemZ_AllRegs_RegMask; 217 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 218 MF.getFunction().getAttributes().hasAttrSomewhere( 219 Attribute::SwiftError)) 220 return CSR_SystemZ_SwiftError_RegMask; 221 return CSR_SystemZ_RegMask; 222 } 223 224 BitVector 225 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 226 BitVector Reserved(getNumRegs()); 227 const SystemZFrameLowering *TFI = getFrameLowering(MF); 228 229 if (TFI->hasFP(MF)) { 230 // R11D is the frame pointer. Reserve all aliases. 231 Reserved.set(SystemZ::R11D); 232 Reserved.set(SystemZ::R11L); 233 Reserved.set(SystemZ::R11H); 234 Reserved.set(SystemZ::R10Q); 235 } 236 237 // R15D is the stack pointer. Reserve all aliases. 238 Reserved.set(SystemZ::R15D); 239 Reserved.set(SystemZ::R15L); 240 Reserved.set(SystemZ::R15H); 241 Reserved.set(SystemZ::R14Q); 242 243 // A0 and A1 hold the thread pointer. 244 Reserved.set(SystemZ::A0); 245 Reserved.set(SystemZ::A1); 246 247 // FPC is the floating-point control register. 248 Reserved.set(SystemZ::FPC); 249 250 return Reserved; 251 } 252 253 void 254 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 255 int SPAdj, unsigned FIOperandNum, 256 RegScavenger *RS) const { 257 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 258 259 MachineBasicBlock &MBB = *MI->getParent(); 260 MachineFunction &MF = *MBB.getParent(); 261 auto *TII = 262 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 263 const SystemZFrameLowering *TFI = getFrameLowering(MF); 264 DebugLoc DL = MI->getDebugLoc(); 265 266 // Decompose the frame index into a base and offset. 267 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 268 Register BasePtr; 269 int64_t Offset = 270 (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() + 271 MI->getOperand(FIOperandNum + 1).getImm()); 272 273 // Special handling of dbg_value instructions. 274 if (MI->isDebugValue()) { 275 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 276 MI->getDebugOffset().ChangeToImmediate(Offset); 277 return; 278 } 279 280 // See if the offset is in range, or if an equivalent instruction that 281 // accepts the offset exists. 282 unsigned Opcode = MI->getOpcode(); 283 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 284 if (OpcodeForOffset) { 285 if (OpcodeForOffset == SystemZ::LE && 286 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 287 // If LE is ok for offset, use LDE instead on z13. 288 OpcodeForOffset = SystemZ::LDE32; 289 } 290 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 291 } 292 else { 293 // Create an anchor point that is in range. Start at 0xffff so that 294 // can use LLILH to load the immediate. 295 int64_t OldOffset = Offset; 296 int64_t Mask = 0xffff; 297 do { 298 Offset = OldOffset & Mask; 299 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 300 Mask >>= 1; 301 assert(Mask && "One offset must be OK"); 302 } while (!OpcodeForOffset); 303 304 Register ScratchReg = 305 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 306 int64_t HighOffset = OldOffset - Offset; 307 308 if (MI->getDesc().TSFlags & SystemZII::HasIndex 309 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 310 // Load the offset into the scratch register and use it as an index. 311 // The scratch register then dies here. 312 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 313 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 314 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 315 false, false, true); 316 } else { 317 // Load the anchor address into a scratch register. 318 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 319 if (LAOpcode) 320 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 321 .addReg(BasePtr).addImm(HighOffset).addReg(0); 322 else { 323 // Load the high offset into the scratch register and use it as 324 // an index. 325 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 326 BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg) 327 .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg); 328 } 329 330 // Use the scratch register as the base. It then dies here. 331 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 332 false, false, true); 333 } 334 } 335 MI->setDesc(TII->get(OpcodeForOffset)); 336 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 337 } 338 339 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 340 const TargetRegisterClass *SrcRC, 341 unsigned SubReg, 342 const TargetRegisterClass *DstRC, 343 unsigned DstSubReg, 344 const TargetRegisterClass *NewRC, 345 LiveIntervals &LIS) const { 346 assert (MI->isCopy() && "Only expecting COPY instructions"); 347 348 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 349 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 350 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 351 return true; 352 353 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 354 // and local to one MBB with not too much interferring registers. Otherwise 355 // regalloc may run out of registers. 356 357 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 358 Register GR128Reg = MI->getOperand(WideOpNo).getReg(); 359 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 360 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 361 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 362 363 // Check that the two virtual registers are local to MBB. 364 MachineBasicBlock *MBB = MI->getParent(); 365 MachineInstr *FirstMI_GR128 = 366 LIS.getInstructionFromIndex(IntGR128.beginIndex()); 367 MachineInstr *FirstMI_GRNar = 368 LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 369 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex()); 370 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 371 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) || 372 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) || 373 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) || 374 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB)) 375 return false; 376 377 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr; 378 if (WideOpNo == 1) { 379 MII = FirstMI_GR128; 380 MEE = LastMI_GRNar; 381 } else { 382 MII = FirstMI_GRNar; 383 MEE = LastMI_GR128; 384 } 385 386 // Check if coalescing seems safe by finding the set of clobbered physreg 387 // pairs in the region. 388 BitVector PhysClobbered(getNumRegs()); 389 MEE++; 390 for (; MII != MEE; ++MII) { 391 for (const MachineOperand &MO : MII->operands()) 392 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) { 393 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 394 SI.isValid(); ++SI) 395 if (NewRC->contains(*SI)) { 396 PhysClobbered.set(*SI); 397 break; 398 } 399 } 400 } 401 402 // Demand an arbitrary margin of free regs. 403 unsigned const DemandedFreeGR128 = 3; 404 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 405 return false; 406 407 return true; 408 } 409 410 Register 411 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 412 const SystemZFrameLowering *TFI = getFrameLowering(MF); 413 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D; 414 } 415 416 const TargetRegisterClass * 417 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 418 if (RC == &SystemZ::CCRRegClass) 419 return &SystemZ::GR32BitRegClass; 420 return RC; 421 } 422 423