1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZRegisterInfo.h" 10 #include "SystemZInstrInfo.h" 11 #include "SystemZSubtarget.h" 12 #include "llvm/ADT/SmallSet.h" 13 #include "llvm/CodeGen/LiveIntervals.h" 14 #include "llvm/CodeGen/MachineInstrBuilder.h" 15 #include "llvm/CodeGen/MachineRegisterInfo.h" 16 #include "llvm/CodeGen/TargetFrameLowering.h" 17 #include "llvm/CodeGen/VirtRegMap.h" 18 #include "llvm/IR/DebugInfoMetadata.h" 19 20 using namespace llvm; 21 22 #define GET_REGINFO_TARGET_DESC 23 #include "SystemZGenRegisterInfo.inc" 24 25 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO 26 // somehow belongs in it. Otherwise, return GRX32. 27 static const TargetRegisterClass *getRC32(MachineOperand &MO, 28 const VirtRegMap *VRM, 29 const MachineRegisterInfo *MRI) { 30 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); 31 32 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) || 33 MO.getSubReg() == SystemZ::subreg_l32 || 34 MO.getSubReg() == SystemZ::subreg_hl32) 35 return &SystemZ::GR32BitRegClass; 36 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) || 37 MO.getSubReg() == SystemZ::subreg_h32 || 38 MO.getSubReg() == SystemZ::subreg_hh32) 39 return &SystemZ::GRH32BitRegClass; 40 41 if (VRM && VRM->hasPhys(MO.getReg())) { 42 Register PhysReg = VRM->getPhys(MO.getReg()); 43 if (SystemZ::GR32BitRegClass.contains(PhysReg)) 44 return &SystemZ::GR32BitRegClass; 45 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && 46 "Phys reg not in GR32 or GRH32?"); 47 return &SystemZ::GRH32BitRegClass; 48 } 49 50 assert (RC == &SystemZ::GRX32BitRegClass); 51 return RC; 52 } 53 54 // Pass the registers of RC as hints while making sure that if any of these 55 // registers are copy hints (and therefore already in Hints), hint them 56 // first. 57 static void addHints(ArrayRef<MCPhysReg> Order, 58 SmallVectorImpl<MCPhysReg> &Hints, 59 const TargetRegisterClass *RC, 60 const MachineRegisterInfo *MRI) { 61 SmallSet<unsigned, 4> CopyHints; 62 CopyHints.insert(Hints.begin(), Hints.end()); 63 Hints.clear(); 64 for (MCPhysReg Reg : Order) 65 if (CopyHints.count(Reg) && 66 RC->contains(Reg) && !MRI->isReserved(Reg)) 67 Hints.push_back(Reg); 68 for (MCPhysReg Reg : Order) 69 if (!CopyHints.count(Reg) && 70 RC->contains(Reg) && !MRI->isReserved(Reg)) 71 Hints.push_back(Reg); 72 } 73 74 bool SystemZRegisterInfo::getRegAllocationHints( 75 Register VirtReg, ArrayRef<MCPhysReg> Order, 76 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 77 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 78 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 79 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 80 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 81 82 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 83 VirtReg, Order, Hints, MF, VRM, Matrix); 84 85 if (VRM != nullptr) { 86 // Add any two address hints after any copy hints. 87 SmallSet<unsigned, 4> TwoAddrHints; 88 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg)) 89 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) { 90 const MachineOperand *VRRegMO = nullptr; 91 const MachineOperand *OtherMO = nullptr; 92 const MachineOperand *CommuMO = nullptr; 93 if (VirtReg == Use.getOperand(0).getReg()) { 94 VRRegMO = &Use.getOperand(0); 95 OtherMO = &Use.getOperand(1); 96 if (Use.isCommutable()) 97 CommuMO = &Use.getOperand(2); 98 } else if (VirtReg == Use.getOperand(1).getReg()) { 99 VRRegMO = &Use.getOperand(1); 100 OtherMO = &Use.getOperand(0); 101 } else if (VirtReg == Use.getOperand(2).getReg() && 102 Use.isCommutable()) { 103 VRRegMO = &Use.getOperand(2); 104 OtherMO = &Use.getOperand(0); 105 } else 106 continue; 107 108 auto tryAddHint = [&](const MachineOperand *MO) -> void { 109 Register Reg = MO->getReg(); 110 Register PhysReg = Register::isPhysicalRegister(Reg) 111 ? Reg 112 : Register(VRM->getPhys(Reg)); 113 if (PhysReg) { 114 if (MO->getSubReg()) 115 PhysReg = getSubReg(PhysReg, MO->getSubReg()); 116 if (VRRegMO->getSubReg()) 117 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(), 118 MRI->getRegClass(VirtReg)); 119 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) 120 TwoAddrHints.insert(PhysReg); 121 } 122 }; 123 tryAddHint(OtherMO); 124 if (CommuMO) 125 tryAddHint(CommuMO); 126 } 127 for (MCPhysReg OrderReg : Order) 128 if (TwoAddrHints.count(OrderReg)) 129 Hints.push_back(OrderReg); 130 } 131 132 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) { 133 SmallVector<Register, 8> Worklist; 134 SmallSet<Register, 4> DoneRegs; 135 Worklist.push_back(VirtReg); 136 while (Worklist.size()) { 137 Register Reg = Worklist.pop_back_val(); 138 if (!DoneRegs.insert(Reg).second) 139 continue; 140 141 for (auto &Use : MRI->reg_instructions(Reg)) { 142 // For LOCRMux, see if the other operand is already a high or low 143 // register, and in that case give the corresponding hints for 144 // VirtReg. LOCR instructions need both operands in either high or 145 // low parts. Same handling for SELRMux. 146 if (Use.getOpcode() == SystemZ::LOCRMux || 147 Use.getOpcode() == SystemZ::SELRMux) { 148 MachineOperand &TrueMO = Use.getOperand(1); 149 MachineOperand &FalseMO = Use.getOperand(2); 150 const TargetRegisterClass *RC = 151 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI), 152 getRC32(TrueMO, VRM, MRI)); 153 if (Use.getOpcode() == SystemZ::SELRMux) 154 RC = TRI->getCommonSubClass(RC, 155 getRC32(Use.getOperand(0), VRM, MRI)); 156 if (RC && RC != &SystemZ::GRX32BitRegClass) { 157 addHints(Order, Hints, RC, MRI); 158 // Return true to make these hints the only regs available to 159 // RA. This may mean extra spilling but since the alternative is 160 // a jump sequence expansion of the LOCRMux, it is preferred. 161 return true; 162 } 163 164 // Add the other operand of the LOCRMux to the worklist. 165 Register OtherReg = 166 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); 167 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass) 168 Worklist.push_back(OtherReg); 169 } // end LOCRMux 170 else if (Use.getOpcode() == SystemZ::CHIMux || 171 Use.getOpcode() == SystemZ::CFIMux) { 172 if (Use.getOperand(1).getImm() == 0) { 173 bool OnlyLMuxes = true; 174 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg)) 175 if (DefMI.getOpcode() != SystemZ::LMux) 176 OnlyLMuxes = false; 177 if (OnlyLMuxes) { 178 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI); 179 // Return false to make these hints preferred but not obligatory. 180 return false; 181 } 182 } 183 } // end CHIMux / CFIMux 184 } 185 } 186 } 187 188 return BaseImplRetVal; 189 } 190 191 const MCPhysReg * 192 SystemZXPLINK64Registers::getCalleeSavedRegs(const MachineFunction *MF) const { 193 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 194 return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_SaveList 195 : CSR_SystemZ_XPLINK64_SaveList; 196 } 197 198 const MCPhysReg * 199 SystemZELFRegisters::getCalleeSavedRegs(const MachineFunction *MF) const { 200 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>(); 201 if (MF->getFunction().getCallingConv() == CallingConv::GHC) 202 return CSR_SystemZ_NoRegs_SaveList; 203 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) 204 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList 205 : CSR_SystemZ_AllRegs_SaveList; 206 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && 207 MF->getFunction().getAttributes().hasAttrSomewhere( 208 Attribute::SwiftError)) 209 return CSR_SystemZ_SwiftError_SaveList; 210 return CSR_SystemZ_ELF_SaveList; 211 } 212 213 const uint32_t * 214 SystemZXPLINK64Registers::getCallPreservedMask(const MachineFunction &MF, 215 CallingConv::ID CC) const { 216 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 217 return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_RegMask 218 : CSR_SystemZ_XPLINK64_RegMask; 219 } 220 221 const uint32_t * 222 SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF, 223 CallingConv::ID CC) const { 224 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 225 if (CC == CallingConv::GHC) 226 return CSR_SystemZ_NoRegs_RegMask; 227 if (CC == CallingConv::AnyReg) 228 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask 229 : CSR_SystemZ_AllRegs_RegMask; 230 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && 231 MF.getFunction().getAttributes().hasAttrSomewhere( 232 Attribute::SwiftError)) 233 return CSR_SystemZ_SwiftError_RegMask; 234 return CSR_SystemZ_ELF_RegMask; 235 } 236 237 SystemZRegisterInfo::SystemZRegisterInfo(unsigned int RA) 238 : SystemZGenRegisterInfo(RA) {} 239 240 const MCPhysReg * 241 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 242 243 const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>(); 244 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 245 246 return Regs->getCalleeSavedRegs(MF); 247 } 248 249 const uint32_t * 250 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 251 CallingConv::ID CC) const { 252 253 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 254 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 255 return Regs->getCallPreservedMask(MF, CC); 256 } 257 258 BitVector 259 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const { 260 BitVector Reserved(getNumRegs()); 261 const SystemZFrameLowering *TFI = getFrameLowering(MF); 262 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 263 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 264 if (TFI->hasFP(MF)) 265 // The frame pointer. Reserve all aliases. 266 for (MCRegAliasIterator AI(Regs->getFramePointerRegister(), this, true); 267 AI.isValid(); ++AI) 268 Reserved.set(*AI); 269 270 // Reserve all aliases for the stack pointer. 271 for (MCRegAliasIterator AI(Regs->getStackPointerRegister(), this, true); 272 AI.isValid(); ++AI) 273 Reserved.set(*AI); 274 275 // A0 and A1 hold the thread pointer. 276 Reserved.set(SystemZ::A0); 277 Reserved.set(SystemZ::A1); 278 279 // FPC is the floating-point control register. 280 Reserved.set(SystemZ::FPC); 281 282 return Reserved; 283 } 284 285 void 286 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, 287 int SPAdj, unsigned FIOperandNum, 288 RegScavenger *RS) const { 289 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame"); 290 291 MachineBasicBlock &MBB = *MI->getParent(); 292 MachineFunction &MF = *MBB.getParent(); 293 auto *TII = MF.getSubtarget<SystemZSubtarget>().getInstrInfo(); 294 const SystemZFrameLowering *TFI = getFrameLowering(MF); 295 DebugLoc DL = MI->getDebugLoc(); 296 297 // Decompose the frame index into a base and offset. 298 int FrameIndex = MI->getOperand(FIOperandNum).getIndex(); 299 Register BasePtr; 300 int64_t Offset = 301 (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() + 302 MI->getOperand(FIOperandNum + 1).getImm()); 303 304 // Special handling of dbg_value instructions. 305 if (MI->isDebugValue()) { 306 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false); 307 if (MI->isNonListDebugValue()) { 308 MI->getDebugOffset().ChangeToImmediate(Offset); 309 } else { 310 unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum)); 311 SmallVector<uint64_t, 3> Ops; 312 DIExpression::appendOffset( 313 Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed()); 314 MI->getDebugExpressionOp().setMetadata( 315 DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx)); 316 } 317 return; 318 } 319 320 // See if the offset is in range, or if an equivalent instruction that 321 // accepts the offset exists. 322 unsigned Opcode = MI->getOpcode(); 323 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset, &*MI); 324 if (OpcodeForOffset) { 325 if (OpcodeForOffset == SystemZ::LE && 326 MF.getSubtarget<SystemZSubtarget>().hasVector()) { 327 // If LE is ok for offset, use LDE instead on z13. 328 OpcodeForOffset = SystemZ::LDE32; 329 } 330 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 331 } 332 else { 333 // Create an anchor point that is in range. Start at 0xffff so that 334 // can use LLILH to load the immediate. 335 int64_t OldOffset = Offset; 336 int64_t Mask = 0xffff; 337 do { 338 Offset = OldOffset & Mask; 339 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset); 340 Mask >>= 1; 341 assert(Mask && "One offset must be OK"); 342 } while (!OpcodeForOffset); 343 344 Register ScratchReg = 345 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); 346 int64_t HighOffset = OldOffset - Offset; 347 348 if (MI->getDesc().TSFlags & SystemZII::HasIndex 349 && MI->getOperand(FIOperandNum + 2).getReg() == 0) { 350 // Load the offset into the scratch register and use it as an index. 351 // The scratch register then dies here. 352 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 353 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false); 354 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg, 355 false, false, true); 356 } else { 357 // Load the anchor address into a scratch register. 358 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset); 359 if (LAOpcode) 360 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg) 361 .addReg(BasePtr).addImm(HighOffset).addReg(0); 362 else { 363 // Load the high offset into the scratch register and use it as 364 // an index. 365 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset); 366 BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg) 367 .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg); 368 } 369 370 // Use the scratch register as the base. It then dies here. 371 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg, 372 false, false, true); 373 } 374 } 375 MI->setDesc(TII->get(OpcodeForOffset)); 376 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 377 } 378 379 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI, 380 const TargetRegisterClass *SrcRC, 381 unsigned SubReg, 382 const TargetRegisterClass *DstRC, 383 unsigned DstSubReg, 384 const TargetRegisterClass *NewRC, 385 LiveIntervals &LIS) const { 386 assert (MI->isCopy() && "Only expecting COPY instructions"); 387 388 // Coalesce anything which is not a COPY involving a subreg to/from GR128. 389 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) && 390 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64))) 391 return true; 392 393 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small 394 // and local to one MBB with not too much interferring registers. Otherwise 395 // regalloc may run out of registers. 396 397 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); 398 Register GR128Reg = MI->getOperand(WideOpNo).getReg(); 399 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); 400 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg); 401 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); 402 403 // Check that the two virtual registers are local to MBB. 404 MachineBasicBlock *MBB = MI->getParent(); 405 MachineInstr *FirstMI_GR128 = 406 LIS.getInstructionFromIndex(IntGR128.beginIndex()); 407 MachineInstr *FirstMI_GRNar = 408 LIS.getInstructionFromIndex(IntGRNar.beginIndex()); 409 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex()); 410 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex()); 411 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) || 412 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) || 413 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) || 414 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB)) 415 return false; 416 417 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr; 418 if (WideOpNo == 1) { 419 MII = FirstMI_GR128; 420 MEE = LastMI_GRNar; 421 } else { 422 MII = FirstMI_GRNar; 423 MEE = LastMI_GR128; 424 } 425 426 // Check if coalescing seems safe by finding the set of clobbered physreg 427 // pairs in the region. 428 BitVector PhysClobbered(getNumRegs()); 429 MEE++; 430 for (; MII != MEE; ++MII) { 431 for (const MachineOperand &MO : MII->operands()) 432 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) { 433 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/); 434 SI.isValid(); ++SI) 435 if (NewRC->contains(*SI)) { 436 PhysClobbered.set(*SI); 437 break; 438 } 439 } 440 } 441 442 // Demand an arbitrary margin of free regs. 443 unsigned const DemandedFreeGR128 = 3; 444 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128)) 445 return false; 446 447 return true; 448 } 449 450 Register 451 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 452 const SystemZFrameLowering *TFI = getFrameLowering(MF); 453 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 454 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters(); 455 456 return TFI->hasFP(MF) ? Regs->getFramePointerRegister() 457 : Regs->getStackPointerRegister(); 458 } 459 460 const TargetRegisterClass * 461 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 462 if (RC == &SystemZ::CCRRegClass) 463 return &SystemZ::GR32BitRegClass; 464 return RC; 465 } 466 467