1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the X86 implementation of the TargetRegisterInfo class. 10 // This file is responsible for the frame pointer elimination optimization 11 // on X86. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "X86RegisterInfo.h" 16 #include "X86FrameLowering.h" 17 #include "X86MachineFunctionInfo.h" 18 #include "X86Subtarget.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/CodeGen/LiveRegMatrix.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineFunctionPass.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/TargetFrameLowering.h" 28 #include "llvm/CodeGen/TargetInstrInfo.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include "llvm/Target/TargetOptions.h" 36 37 using namespace llvm; 38 39 #define GET_REGINFO_TARGET_DESC 40 #include "X86GenRegisterInfo.inc" 41 42 static cl::opt<bool> 43 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), 44 cl::desc("Enable use of a base pointer for complex stack frames")); 45 46 X86RegisterInfo::X86RegisterInfo(const Triple &TT) 47 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP), 48 X86_MC::getDwarfRegFlavour(TT, false), 49 X86_MC::getDwarfRegFlavour(TT, true), 50 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) { 51 X86_MC::initLLVMToSEHAndCVRegMapping(this); 52 53 // Cache some information. 54 Is64Bit = TT.isArch64Bit(); 55 IsWin64 = Is64Bit && TT.isOSWindows(); 56 57 // Use a callee-saved register as the base pointer. These registers must 58 // not conflict with any ABI requirements. For example, in 32-bit mode PIC 59 // requires GOT in the EBX register before function calls via PLT GOT pointer. 60 if (Is64Bit) { 61 SlotSize = 8; 62 // This matches the simplified 32-bit pointer code in the data layout 63 // computation. 64 // FIXME: Should use the data layout? 65 bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32; 66 StackPtr = Use64BitReg ? X86::RSP : X86::ESP; 67 FramePtr = Use64BitReg ? X86::RBP : X86::EBP; 68 BasePtr = Use64BitReg ? X86::RBX : X86::EBX; 69 } else { 70 SlotSize = 4; 71 StackPtr = X86::ESP; 72 FramePtr = X86::EBP; 73 BasePtr = X86::ESI; 74 } 75 } 76 77 int 78 X86RegisterInfo::getSEHRegNum(unsigned i) const { 79 return getEncodingValue(i); 80 } 81 82 const TargetRegisterClass * 83 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC, 84 unsigned Idx) const { 85 // The sub_8bit sub-register index is more constrained in 32-bit mode. 86 // It behaves just like the sub_8bit_hi index. 87 if (!Is64Bit && Idx == X86::sub_8bit) 88 Idx = X86::sub_8bit_hi; 89 90 // Forward to TableGen's default version. 91 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx); 92 } 93 94 const TargetRegisterClass * 95 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 96 const TargetRegisterClass *B, 97 unsigned SubIdx) const { 98 // The sub_8bit sub-register index is more constrained in 32-bit mode. 99 if (!Is64Bit && SubIdx == X86::sub_8bit) { 100 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi); 101 if (!A) 102 return nullptr; 103 } 104 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx); 105 } 106 107 const TargetRegisterClass * 108 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 109 const MachineFunction &MF) const { 110 // Don't allow super-classes of GR8_NOREX. This class is only used after 111 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied 112 // to the full GR8 register class in 64-bit mode, so we cannot allow the 113 // reigster class inflation. 114 // 115 // The GR8_NOREX class is always used in a way that won't be constrained to a 116 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the 117 // full GR8 class. 118 if (RC == &X86::GR8_NOREXRegClass) 119 return RC; 120 121 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 122 123 const TargetRegisterClass *Super = RC; 124 TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); 125 do { 126 switch (Super->getID()) { 127 case X86::FR32RegClassID: 128 case X86::FR64RegClassID: 129 // If AVX-512 isn't supported we should only inflate to these classes. 130 if (!Subtarget.hasAVX512() && 131 getRegSizeInBits(*Super) == getRegSizeInBits(*RC)) 132 return Super; 133 break; 134 case X86::VR128RegClassID: 135 case X86::VR256RegClassID: 136 // If VLX isn't supported we should only inflate to these classes. 137 if (!Subtarget.hasVLX() && 138 getRegSizeInBits(*Super) == getRegSizeInBits(*RC)) 139 return Super; 140 break; 141 case X86::VR128XRegClassID: 142 case X86::VR256XRegClassID: 143 // If VLX isn't support we shouldn't inflate to these classes. 144 if (Subtarget.hasVLX() && 145 getRegSizeInBits(*Super) == getRegSizeInBits(*RC)) 146 return Super; 147 break; 148 case X86::FR32XRegClassID: 149 case X86::FR64XRegClassID: 150 // If AVX-512 isn't support we shouldn't inflate to these classes. 151 if (Subtarget.hasAVX512() && 152 getRegSizeInBits(*Super) == getRegSizeInBits(*RC)) 153 return Super; 154 break; 155 case X86::GR8RegClassID: 156 case X86::GR16RegClassID: 157 case X86::GR32RegClassID: 158 case X86::GR64RegClassID: 159 case X86::RFP32RegClassID: 160 case X86::RFP64RegClassID: 161 case X86::RFP80RegClassID: 162 case X86::VR512_0_15RegClassID: 163 case X86::VR512RegClassID: 164 // Don't return a super-class that would shrink the spill size. 165 // That can happen with the vector and float classes. 166 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC)) 167 return Super; 168 } 169 Super = *I++; 170 } while (Super); 171 return RC; 172 } 173 174 const TargetRegisterClass * 175 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, 176 unsigned Kind) const { 177 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 178 switch (Kind) { 179 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); 180 case 0: // Normal GPRs. 181 if (Subtarget.isTarget64BitLP64()) 182 return &X86::GR64RegClass; 183 // If the target is 64bit but we have been told to use 32bit addresses, 184 // we can still use 64-bit register as long as we know the high bits 185 // are zeros. 186 // Reflect that in the returned register class. 187 if (Is64Bit) { 188 // When the target also allows 64-bit frame pointer and we do have a 189 // frame, this is fine to use it for the address accesses as well. 190 const X86FrameLowering *TFI = getFrameLowering(MF); 191 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr 192 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass 193 : &X86::LOW32_ADDR_ACCESSRegClass; 194 } 195 return &X86::GR32RegClass; 196 case 1: // Normal GPRs except the stack pointer (for encoding reasons). 197 if (Subtarget.isTarget64BitLP64()) 198 return &X86::GR64_NOSPRegClass; 199 // NOSP does not contain RIP, so no special case here. 200 return &X86::GR32_NOSPRegClass; 201 case 2: // NOREX GPRs. 202 if (Subtarget.isTarget64BitLP64()) 203 return &X86::GR64_NOREXRegClass; 204 return &X86::GR32_NOREXRegClass; 205 case 3: // NOREX GPRs except the stack pointer (for encoding reasons). 206 if (Subtarget.isTarget64BitLP64()) 207 return &X86::GR64_NOREX_NOSPRegClass; 208 // NOSP does not contain RIP, so no special case here. 209 return &X86::GR32_NOREX_NOSPRegClass; 210 case 4: // Available for tailcall (not callee-saved GPRs). 211 return getGPRsForTailCall(MF); 212 } 213 } 214 215 bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 216 unsigned DefSubReg, 217 const TargetRegisterClass *SrcRC, 218 unsigned SrcSubReg) const { 219 // Prevent rewriting a copy where the destination size is larger than the 220 // input size. See PR41619. 221 // FIXME: Should this be factored into the base implementation somehow. 222 if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 && 223 SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit) 224 return false; 225 226 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg, 227 SrcRC, SrcSubReg); 228 } 229 230 const TargetRegisterClass * 231 X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const { 232 const Function &F = MF.getFunction(); 233 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64)) 234 return &X86::GR64_TCW64RegClass; 235 else if (Is64Bit) 236 return &X86::GR64_TCRegClass; 237 238 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE); 239 if (hasHipeCC) 240 return &X86::GR32RegClass; 241 return &X86::GR32_TCRegClass; 242 } 243 244 const TargetRegisterClass * 245 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 246 if (RC == &X86::CCRRegClass) { 247 if (Is64Bit) 248 return &X86::GR64RegClass; 249 else 250 return &X86::GR32RegClass; 251 } 252 return RC; 253 } 254 255 unsigned 256 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 257 MachineFunction &MF) const { 258 const X86FrameLowering *TFI = getFrameLowering(MF); 259 260 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0; 261 switch (RC->getID()) { 262 default: 263 return 0; 264 case X86::GR32RegClassID: 265 return 4 - FPDiff; 266 case X86::GR64RegClassID: 267 return 12 - FPDiff; 268 case X86::VR128RegClassID: 269 return Is64Bit ? 10 : 4; 270 case X86::VR64RegClassID: 271 return 4; 272 } 273 } 274 275 const MCPhysReg * 276 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 277 assert(MF && "MachineFunction required"); 278 279 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>(); 280 const Function &F = MF->getFunction(); 281 bool HasSSE = Subtarget.hasSSE1(); 282 bool HasAVX = Subtarget.hasAVX(); 283 bool HasAVX512 = Subtarget.hasAVX512(); 284 bool CallsEHReturn = MF->callsEHReturn(); 285 286 CallingConv::ID CC = F.getCallingConv(); 287 288 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling 289 // convention because it has the CSR list. 290 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers")) 291 CC = CallingConv::X86_INTR; 292 293 switch (CC) { 294 case CallingConv::GHC: 295 case CallingConv::HiPE: 296 return CSR_NoRegs_SaveList; 297 case CallingConv::AnyReg: 298 if (HasAVX) 299 return CSR_64_AllRegs_AVX_SaveList; 300 return CSR_64_AllRegs_SaveList; 301 case CallingConv::PreserveMost: 302 return CSR_64_RT_MostRegs_SaveList; 303 case CallingConv::PreserveAll: 304 if (HasAVX) 305 return CSR_64_RT_AllRegs_AVX_SaveList; 306 return CSR_64_RT_AllRegs_SaveList; 307 case CallingConv::CXX_FAST_TLS: 308 if (Is64Bit) 309 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ? 310 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList; 311 break; 312 case CallingConv::Intel_OCL_BI: { 313 if (HasAVX512 && IsWin64) 314 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList; 315 if (HasAVX512 && Is64Bit) 316 return CSR_64_Intel_OCL_BI_AVX512_SaveList; 317 if (HasAVX && IsWin64) 318 return CSR_Win64_Intel_OCL_BI_AVX_SaveList; 319 if (HasAVX && Is64Bit) 320 return CSR_64_Intel_OCL_BI_AVX_SaveList; 321 if (!HasAVX && !IsWin64 && Is64Bit) 322 return CSR_64_Intel_OCL_BI_SaveList; 323 break; 324 } 325 case CallingConv::HHVM: 326 return CSR_64_HHVM_SaveList; 327 case CallingConv::X86_RegCall: 328 if (Is64Bit) { 329 if (IsWin64) { 330 return (HasSSE ? CSR_Win64_RegCall_SaveList : 331 CSR_Win64_RegCall_NoSSE_SaveList); 332 } else { 333 return (HasSSE ? CSR_SysV64_RegCall_SaveList : 334 CSR_SysV64_RegCall_NoSSE_SaveList); 335 } 336 } else { 337 return (HasSSE ? CSR_32_RegCall_SaveList : 338 CSR_32_RegCall_NoSSE_SaveList); 339 } 340 case CallingConv::CFGuard_Check: 341 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86"); 342 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList 343 : CSR_Win32_CFGuard_Check_NoSSE_SaveList); 344 case CallingConv::Cold: 345 if (Is64Bit) 346 return CSR_64_MostRegs_SaveList; 347 break; 348 case CallingConv::Win64: 349 if (!HasSSE) 350 return CSR_Win64_NoSSE_SaveList; 351 return CSR_Win64_SaveList; 352 case CallingConv::X86_64_SysV: 353 if (CallsEHReturn) 354 return CSR_64EHRet_SaveList; 355 return CSR_64_SaveList; 356 case CallingConv::X86_INTR: 357 if (Is64Bit) { 358 if (HasAVX512) 359 return CSR_64_AllRegs_AVX512_SaveList; 360 if (HasAVX) 361 return CSR_64_AllRegs_AVX_SaveList; 362 if (HasSSE) 363 return CSR_64_AllRegs_SaveList; 364 return CSR_64_AllRegs_NoSSE_SaveList; 365 } else { 366 if (HasAVX512) 367 return CSR_32_AllRegs_AVX512_SaveList; 368 if (HasAVX) 369 return CSR_32_AllRegs_AVX_SaveList; 370 if (HasSSE) 371 return CSR_32_AllRegs_SSE_SaveList; 372 return CSR_32_AllRegs_SaveList; 373 } 374 default: 375 break; 376 } 377 378 if (Is64Bit) { 379 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() && 380 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError); 381 if (IsSwiftCC) 382 return IsWin64 ? CSR_Win64_SwiftError_SaveList 383 : CSR_64_SwiftError_SaveList; 384 385 if (IsWin64) 386 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList; 387 if (CallsEHReturn) 388 return CSR_64EHRet_SaveList; 389 return CSR_64_SaveList; 390 } 391 392 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList; 393 } 394 395 const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy( 396 const MachineFunction *MF) const { 397 assert(MF && "Invalid MachineFunction pointer."); 398 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 399 MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR()) 400 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList; 401 return nullptr; 402 } 403 404 const uint32_t * 405 X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, 406 CallingConv::ID CC) const { 407 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 408 bool HasSSE = Subtarget.hasSSE1(); 409 bool HasAVX = Subtarget.hasAVX(); 410 bool HasAVX512 = Subtarget.hasAVX512(); 411 412 switch (CC) { 413 case CallingConv::GHC: 414 case CallingConv::HiPE: 415 return CSR_NoRegs_RegMask; 416 case CallingConv::AnyReg: 417 if (HasAVX) 418 return CSR_64_AllRegs_AVX_RegMask; 419 return CSR_64_AllRegs_RegMask; 420 case CallingConv::PreserveMost: 421 return CSR_64_RT_MostRegs_RegMask; 422 case CallingConv::PreserveAll: 423 if (HasAVX) 424 return CSR_64_RT_AllRegs_AVX_RegMask; 425 return CSR_64_RT_AllRegs_RegMask; 426 case CallingConv::CXX_FAST_TLS: 427 if (Is64Bit) 428 return CSR_64_TLS_Darwin_RegMask; 429 break; 430 case CallingConv::Intel_OCL_BI: { 431 if (HasAVX512 && IsWin64) 432 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask; 433 if (HasAVX512 && Is64Bit) 434 return CSR_64_Intel_OCL_BI_AVX512_RegMask; 435 if (HasAVX && IsWin64) 436 return CSR_Win64_Intel_OCL_BI_AVX_RegMask; 437 if (HasAVX && Is64Bit) 438 return CSR_64_Intel_OCL_BI_AVX_RegMask; 439 if (!HasAVX && !IsWin64 && Is64Bit) 440 return CSR_64_Intel_OCL_BI_RegMask; 441 break; 442 } 443 case CallingConv::HHVM: 444 return CSR_64_HHVM_RegMask; 445 case CallingConv::X86_RegCall: 446 if (Is64Bit) { 447 if (IsWin64) { 448 return (HasSSE ? CSR_Win64_RegCall_RegMask : 449 CSR_Win64_RegCall_NoSSE_RegMask); 450 } else { 451 return (HasSSE ? CSR_SysV64_RegCall_RegMask : 452 CSR_SysV64_RegCall_NoSSE_RegMask); 453 } 454 } else { 455 return (HasSSE ? CSR_32_RegCall_RegMask : 456 CSR_32_RegCall_NoSSE_RegMask); 457 } 458 case CallingConv::CFGuard_Check: 459 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86"); 460 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask 461 : CSR_Win32_CFGuard_Check_NoSSE_RegMask); 462 case CallingConv::Cold: 463 if (Is64Bit) 464 return CSR_64_MostRegs_RegMask; 465 break; 466 case CallingConv::Win64: 467 return CSR_Win64_RegMask; 468 case CallingConv::X86_64_SysV: 469 return CSR_64_RegMask; 470 case CallingConv::X86_INTR: 471 if (Is64Bit) { 472 if (HasAVX512) 473 return CSR_64_AllRegs_AVX512_RegMask; 474 if (HasAVX) 475 return CSR_64_AllRegs_AVX_RegMask; 476 if (HasSSE) 477 return CSR_64_AllRegs_RegMask; 478 return CSR_64_AllRegs_NoSSE_RegMask; 479 } else { 480 if (HasAVX512) 481 return CSR_32_AllRegs_AVX512_RegMask; 482 if (HasAVX) 483 return CSR_32_AllRegs_AVX_RegMask; 484 if (HasSSE) 485 return CSR_32_AllRegs_SSE_RegMask; 486 return CSR_32_AllRegs_RegMask; 487 } 488 default: 489 break; 490 } 491 492 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check 493 // callsEHReturn(). 494 if (Is64Bit) { 495 const Function &F = MF.getFunction(); 496 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() && 497 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError); 498 if (IsSwiftCC) 499 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask; 500 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask; 501 } 502 503 return CSR_32_RegMask; 504 } 505 506 const uint32_t* 507 X86RegisterInfo::getNoPreservedMask() const { 508 return CSR_NoRegs_RegMask; 509 } 510 511 const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const { 512 return CSR_64_TLS_Darwin_RegMask; 513 } 514 515 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { 516 BitVector Reserved(getNumRegs()); 517 const X86FrameLowering *TFI = getFrameLowering(MF); 518 519 // Set the floating point control register as reserved. 520 Reserved.set(X86::FPCW); 521 522 // Set the floating point status register as reserved. 523 Reserved.set(X86::FPSW); 524 525 // Set the SIMD floating point control register as reserved. 526 Reserved.set(X86::MXCSR); 527 528 // Set the stack-pointer register and its aliases as reserved. 529 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP)) 530 Reserved.set(SubReg); 531 532 // Set the Shadow Stack Pointer as reserved. 533 Reserved.set(X86::SSP); 534 535 // Set the instruction pointer register and its aliases as reserved. 536 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP)) 537 Reserved.set(SubReg); 538 539 // Set the frame-pointer register and its aliases as reserved if needed. 540 if (TFI->hasFP(MF)) { 541 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP)) 542 Reserved.set(SubReg); 543 } 544 545 // Set the base-pointer register and its aliases as reserved if needed. 546 if (hasBasePointer(MF)) { 547 CallingConv::ID CC = MF.getFunction().getCallingConv(); 548 const uint32_t *RegMask = getCallPreservedMask(MF, CC); 549 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) 550 report_fatal_error( 551 "Stack realignment in presence of dynamic allocas is not supported with" 552 "this calling convention."); 553 554 Register BasePtr = getX86SubSuperRegister(getBaseRegister(), 64); 555 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr)) 556 Reserved.set(SubReg); 557 } 558 559 // Mark the segment registers as reserved. 560 Reserved.set(X86::CS); 561 Reserved.set(X86::SS); 562 Reserved.set(X86::DS); 563 Reserved.set(X86::ES); 564 Reserved.set(X86::FS); 565 Reserved.set(X86::GS); 566 567 // Mark the floating point stack registers as reserved. 568 for (unsigned n = 0; n != 8; ++n) 569 Reserved.set(X86::ST0 + n); 570 571 // Reserve the registers that only exist in 64-bit mode. 572 if (!Is64Bit) { 573 // These 8-bit registers are part of the x86-64 extension even though their 574 // super-registers are old 32-bits. 575 Reserved.set(X86::SIL); 576 Reserved.set(X86::DIL); 577 Reserved.set(X86::BPL); 578 Reserved.set(X86::SPL); 579 Reserved.set(X86::SIH); 580 Reserved.set(X86::DIH); 581 Reserved.set(X86::BPH); 582 Reserved.set(X86::SPH); 583 584 for (unsigned n = 0; n != 8; ++n) { 585 // R8, R9, ... 586 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI) 587 Reserved.set(*AI); 588 589 // XMM8, XMM9, ... 590 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI) 591 Reserved.set(*AI); 592 } 593 } 594 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) { 595 for (unsigned n = 16; n != 32; ++n) { 596 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) 597 Reserved.set(*AI); 598 } 599 } 600 601 assert(checkAllSuperRegsMarked(Reserved, 602 {X86::SIL, X86::DIL, X86::BPL, X86::SPL, 603 X86::SIH, X86::DIH, X86::BPH, X86::SPH})); 604 return Reserved; 605 } 606 607 void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const { 608 // Check if the EFLAGS register is marked as live-out. This shouldn't happen, 609 // because the calling convention defines the EFLAGS register as NOT 610 // preserved. 611 // 612 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding 613 // an assert to track this and clear the register afterwards to avoid 614 // unnecessary crashes during release builds. 615 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) && 616 "EFLAGS are not live-out from a patchpoint."); 617 618 // Also clean other registers that don't need preserving (IP). 619 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP}) 620 Mask[Reg / 32] &= ~(1U << (Reg % 32)); 621 } 622 623 //===----------------------------------------------------------------------===// 624 // Stack Frame Processing methods 625 //===----------------------------------------------------------------------===// 626 627 static bool CantUseSP(const MachineFrameInfo &MFI) { 628 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment(); 629 } 630 631 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { 632 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 633 if (X86FI->hasPreallocatedCall()) 634 return true; 635 636 const MachineFrameInfo &MFI = MF.getFrameInfo(); 637 638 if (!EnableBasePointer) 639 return false; 640 641 // When we need stack realignment, we can't address the stack from the frame 642 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we 643 // can't address variables from the stack pointer. MS inline asm can 644 // reference locals while also adjusting the stack pointer. When we can't 645 // use both the SP and the FP, we need a separate base pointer register. 646 bool CantUseFP = needsStackRealignment(MF); 647 return CantUseFP && CantUseSP(MFI); 648 } 649 650 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { 651 if (!TargetRegisterInfo::canRealignStack(MF)) 652 return false; 653 654 const MachineFrameInfo &MFI = MF.getFrameInfo(); 655 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 656 657 // Stack realignment requires a frame pointer. If we already started 658 // register allocation with frame pointer elimination, it is too late now. 659 if (!MRI->canReserveReg(FramePtr)) 660 return false; 661 662 // If a base pointer is necessary. Check that it isn't too late to reserve 663 // it. 664 if (CantUseSP(MFI)) 665 return MRI->canReserveReg(BasePtr); 666 return true; 667 } 668 669 // tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction 670 // of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'. 671 // TODO: In this case we should be really trying first to entirely eliminate 672 // this instruction which is a plain copy. 673 static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) { 674 MachineInstr &MI = *II; 675 unsigned Opc = II->getOpcode(); 676 // Check if this is a LEA of the form 'lea (%esp), %ebx' 677 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) || 678 MI.getOperand(2).getImm() != 1 || 679 MI.getOperand(3).getReg() != X86::NoRegister || 680 MI.getOperand(4).getImm() != 0 || 681 MI.getOperand(5).getReg() != X86::NoRegister) 682 return false; 683 Register BasePtr = MI.getOperand(1).getReg(); 684 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will 685 // be replaced with a 32-bit operand MOV which will zero extend the upper 686 // 32-bits of the super register. 687 if (Opc == X86::LEA64_32r) 688 BasePtr = getX86SubSuperRegister(BasePtr, 32); 689 Register NewDestReg = MI.getOperand(0).getReg(); 690 const X86InstrInfo *TII = 691 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo(); 692 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr, 693 MI.getOperand(1).isKill()); 694 MI.eraseFromParent(); 695 return true; 696 } 697 698 static bool isFuncletReturnInstr(MachineInstr &MI) { 699 switch (MI.getOpcode()) { 700 case X86::CATCHRET: 701 case X86::CLEANUPRET: 702 return true; 703 default: 704 return false; 705 } 706 llvm_unreachable("impossible"); 707 } 708 709 void 710 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 711 int SPAdj, unsigned FIOperandNum, 712 RegScavenger *RS) const { 713 MachineInstr &MI = *II; 714 MachineBasicBlock &MBB = *MI.getParent(); 715 MachineFunction &MF = *MBB.getParent(); 716 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 717 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false 718 : isFuncletReturnInstr(*MBBI); 719 const X86FrameLowering *TFI = getFrameLowering(MF); 720 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 721 722 // Determine base register and offset. 723 int FIOffset; 724 Register BasePtr; 725 if (MI.isReturn()) { 726 assert((!needsStackRealignment(MF) || 727 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) && 728 "Return instruction can only reference SP relative frame objects"); 729 FIOffset = 730 TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed(); 731 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) { 732 FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr); 733 } else { 734 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed(); 735 } 736 737 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the 738 // simple FP case, and doesn't work with stack realignment. On 32-bit, the 739 // offset is from the traditional base pointer location. On 64-bit, the 740 // offset is from the SP at the end of the prologue, not the FP location. This 741 // matches the behavior of llvm.frameaddress. 742 unsigned Opc = MI.getOpcode(); 743 if (Opc == TargetOpcode::LOCAL_ESCAPE) { 744 MachineOperand &FI = MI.getOperand(FIOperandNum); 745 FI.ChangeToImmediate(FIOffset); 746 return; 747 } 748 749 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit 750 // register as source operand, semantic is the same and destination is 751 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided. 752 // Don't change BasePtr since it is used later for stack adjustment. 753 Register MachineBasePtr = BasePtr; 754 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr)) 755 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64); 756 757 // This must be part of a four operand memory reference. Replace the 758 // FrameIndex with base register. Add an offset to the offset. 759 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false); 760 761 if (BasePtr == StackPtr) 762 FIOffset += SPAdj; 763 764 // The frame index format for stackmaps and patchpoints is different from the 765 // X86 format. It only has a FI and an offset. 766 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { 767 assert(BasePtr == FramePtr && "Expected the FP as base register"); 768 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset; 769 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); 770 return; 771 } 772 773 if (MI.getOperand(FIOperandNum+3).isImm()) { 774 // Offset is a 32-bit integer. 775 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm()); 776 int Offset = FIOffset + Imm; 777 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) && 778 "Requesting 64-bit offset in 32-bit immediate!"); 779 if (Offset != 0 || !tryOptimizeLEAtoMOV(II)) 780 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset); 781 } else { 782 // Offset is symbolic. This is extremely rare. 783 uint64_t Offset = FIOffset + 784 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); 785 MI.getOperand(FIOperandNum + 3).setOffset(Offset); 786 } 787 } 788 789 unsigned X86RegisterInfo::findDeadCallerSavedReg( 790 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const { 791 const MachineFunction *MF = MBB.getParent(); 792 if (MF->callsEHReturn()) 793 return 0; 794 795 const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF); 796 797 if (MBBI == MBB.end()) 798 return 0; 799 800 switch (MBBI->getOpcode()) { 801 default: 802 return 0; 803 case TargetOpcode::PATCHABLE_RET: 804 case X86::RET: 805 case X86::RETL: 806 case X86::RETQ: 807 case X86::RETIL: 808 case X86::RETIQ: 809 case X86::TCRETURNdi: 810 case X86::TCRETURNri: 811 case X86::TCRETURNmi: 812 case X86::TCRETURNdi64: 813 case X86::TCRETURNri64: 814 case X86::TCRETURNmi64: 815 case X86::EH_RETURN: 816 case X86::EH_RETURN64: { 817 SmallSet<uint16_t, 8> Uses; 818 for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) { 819 MachineOperand &MO = MBBI->getOperand(I); 820 if (!MO.isReg() || MO.isDef()) 821 continue; 822 Register Reg = MO.getReg(); 823 if (!Reg) 824 continue; 825 for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI) 826 Uses.insert(*AI); 827 } 828 829 for (auto CS : AvailableRegs) 830 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP) 831 return CS; 832 } 833 } 834 835 return 0; 836 } 837 838 Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { 839 const X86FrameLowering *TFI = getFrameLowering(MF); 840 return TFI->hasFP(MF) ? FramePtr : StackPtr; 841 } 842 843 unsigned 844 X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const { 845 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 846 Register FrameReg = getFrameRegister(MF); 847 if (Subtarget.isTarget64BitILP32()) 848 FrameReg = getX86SubSuperRegister(FrameReg, 32); 849 return FrameReg; 850 } 851 852 unsigned 853 X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const { 854 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>(); 855 Register StackReg = getStackRegister(); 856 if (Subtarget.isTarget64BitILP32()) 857 StackReg = getX86SubSuperRegister(StackReg, 32); 858 return StackReg; 859 } 860 861 static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM, 862 const MachineRegisterInfo *MRI) { 863 if (VRM->hasShape(VirtReg)) 864 return VRM->getShape(VirtReg); 865 866 const MachineOperand &Def = *MRI->def_begin(VirtReg); 867 MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent()); 868 unsigned OpCode = MI->getOpcode(); 869 switch (OpCode) { 870 default: 871 llvm_unreachable("Unexpected machine instruction on tile register!"); 872 break; 873 // We only collect the tile shape that is defined. 874 case X86::PTILELOADDV: 875 case X86::PTDPBSSDV: 876 case X86::PTILEZEROV: 877 MachineOperand &MO1 = MI->getOperand(1); 878 MachineOperand &MO2 = MI->getOperand(2); 879 ShapeT Shape(&MO1, &MO2, MRI); 880 VRM->assignVirt2Shape(VirtReg, Shape); 881 return Shape; 882 } 883 } 884 885 bool X86RegisterInfo::getRegAllocationHints(Register VirtReg, 886 ArrayRef<MCPhysReg> Order, 887 SmallVectorImpl<MCPhysReg> &Hints, 888 const MachineFunction &MF, 889 const VirtRegMap *VRM, 890 const LiveRegMatrix *Matrix) const { 891 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 892 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 893 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( 894 VirtReg, Order, Hints, MF, VRM, Matrix); 895 896 if (RC.getID() != X86::TILERegClassID) 897 return BaseImplRetVal; 898 899 ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI); 900 auto AddHint = [&](MCPhysReg PhysReg) { 901 Register VReg = Matrix->getOneVReg(PhysReg); 902 if (VReg == MCRegister::NoRegister) { // Not allocated yet 903 Hints.push_back(PhysReg); 904 return; 905 } 906 ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI); 907 if (PhysShape == VirtShape) 908 Hints.push_back(PhysReg); 909 }; 910 911 SmallSet<MCPhysReg, 4> CopyHints; 912 CopyHints.insert(Hints.begin(), Hints.end()); 913 Hints.clear(); 914 for (auto Hint : CopyHints) { 915 if (RC.contains(Hint) && !MRI->isReserved(Hint)) 916 AddHint(Hint); 917 } 918 for (MCPhysReg PhysReg : Order) { 919 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) && 920 !MRI->isReserved(PhysReg)) 921 AddHint(PhysReg); 922 } 923 924 #define DEBUG_TYPE "tile-hint" 925 LLVM_DEBUG({ 926 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n"; 927 for (auto Hint : Hints) { 928 dbgs() << "tmm" << Hint << ","; 929 } 930 dbgs() << "\n"; 931 }); 932 #undef DEBUG_TYPE 933 934 return true; 935 } 936