1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of the TargetRegisterInfo class.
10 // This file is responsible for the frame pointer elimination optimization
11 // on X86.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "X86RegisterInfo.h"
16 #include "X86FrameLowering.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/CodeGen/LiveRegMatrix.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetFrameLowering.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TileShapeInfo.h"
30 #include "llvm/CodeGen/VirtRegMap.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38
39 using namespace llvm;
40
41 #define GET_REGINFO_TARGET_DESC
42 #include "X86GenRegisterInfo.inc"
43
44 static cl::opt<bool>
45 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
46 cl::desc("Enable use of a base pointer for complex stack frames"));
47
X86RegisterInfo(const Triple & TT)48 X86RegisterInfo::X86RegisterInfo(const Triple &TT)
49 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
50 X86_MC::getDwarfRegFlavour(TT, false),
51 X86_MC::getDwarfRegFlavour(TT, true),
52 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
53 X86_MC::initLLVMToSEHAndCVRegMapping(this);
54
55 // Cache some information.
56 Is64Bit = TT.isArch64Bit();
57 IsWin64 = Is64Bit && TT.isOSWindows();
58
59 // Use a callee-saved register as the base pointer. These registers must
60 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
61 // requires GOT in the EBX register before function calls via PLT GOT pointer.
62 if (Is64Bit) {
63 SlotSize = 8;
64 // This matches the simplified 32-bit pointer code in the data layout
65 // computation.
66 // FIXME: Should use the data layout?
67 bool Use64BitReg = !TT.isX32();
68 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
69 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
70 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
71 } else {
72 SlotSize = 4;
73 StackPtr = X86::ESP;
74 FramePtr = X86::EBP;
75 BasePtr = X86::ESI;
76 }
77 }
78
79 int
getSEHRegNum(unsigned i) const80 X86RegisterInfo::getSEHRegNum(unsigned i) const {
81 return getEncodingValue(i);
82 }
83
84 const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass * RC,unsigned Idx) const85 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
86 unsigned Idx) const {
87 // The sub_8bit sub-register index is more constrained in 32-bit mode.
88 // It behaves just like the sub_8bit_hi index.
89 if (!Is64Bit && Idx == X86::sub_8bit)
90 Idx = X86::sub_8bit_hi;
91
92 // Forward to TableGen's default version.
93 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
94 }
95
96 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned SubIdx) const97 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
98 const TargetRegisterClass *B,
99 unsigned SubIdx) const {
100 // The sub_8bit sub-register index is more constrained in 32-bit mode.
101 if (!Is64Bit && SubIdx == X86::sub_8bit) {
102 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
103 if (!A)
104 return nullptr;
105 }
106 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
107 }
108
109 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction & MF) const110 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
111 const MachineFunction &MF) const {
112 // Don't allow super-classes of GR8_NOREX. This class is only used after
113 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
114 // to the full GR8 register class in 64-bit mode, so we cannot allow the
115 // reigster class inflation.
116 //
117 // The GR8_NOREX class is always used in a way that won't be constrained to a
118 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
119 // full GR8 class.
120 if (RC == &X86::GR8_NOREXRegClass)
121 return RC;
122
123 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
124
125 const TargetRegisterClass *Super = RC;
126 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
127 do {
128 switch (Super->getID()) {
129 case X86::FR32RegClassID:
130 case X86::FR64RegClassID:
131 // If AVX-512 isn't supported we should only inflate to these classes.
132 if (!Subtarget.hasAVX512() &&
133 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
134 return Super;
135 break;
136 case X86::VR128RegClassID:
137 case X86::VR256RegClassID:
138 // If VLX isn't supported we should only inflate to these classes.
139 if (!Subtarget.hasVLX() &&
140 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
141 return Super;
142 break;
143 case X86::VR128XRegClassID:
144 case X86::VR256XRegClassID:
145 // If VLX isn't support we shouldn't inflate to these classes.
146 if (Subtarget.hasVLX() &&
147 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
148 return Super;
149 break;
150 case X86::FR32XRegClassID:
151 case X86::FR64XRegClassID:
152 // If AVX-512 isn't support we shouldn't inflate to these classes.
153 if (Subtarget.hasAVX512() &&
154 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
155 return Super;
156 break;
157 case X86::GR8RegClassID:
158 case X86::GR16RegClassID:
159 case X86::GR32RegClassID:
160 case X86::GR64RegClassID:
161 case X86::GR8_NOREX2RegClassID:
162 case X86::GR16_NOREX2RegClassID:
163 case X86::GR32_NOREX2RegClassID:
164 case X86::GR64_NOREX2RegClassID:
165 case X86::RFP32RegClassID:
166 case X86::RFP64RegClassID:
167 case X86::RFP80RegClassID:
168 case X86::VR512_0_15RegClassID:
169 case X86::VR512RegClassID:
170 // Don't return a super-class that would shrink the spill size.
171 // That can happen with the vector and float classes.
172 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
173 return Super;
174 }
175 Super = *I++;
176 } while (Super);
177 return RC;
178 }
179
180 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const181 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
182 unsigned Kind) const {
183 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
184 switch (Kind) {
185 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
186 case 0: // Normal GPRs.
187 if (Subtarget.isTarget64BitLP64())
188 return &X86::GR64RegClass;
189 // If the target is 64bit but we have been told to use 32bit addresses,
190 // we can still use 64-bit register as long as we know the high bits
191 // are zeros.
192 // Reflect that in the returned register class.
193 if (Is64Bit) {
194 // When the target also allows 64-bit frame pointer and we do have a
195 // frame, this is fine to use it for the address accesses as well.
196 const X86FrameLowering *TFI = getFrameLowering(MF);
197 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
198 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
199 : &X86::LOW32_ADDR_ACCESSRegClass;
200 }
201 return &X86::GR32RegClass;
202 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
203 if (Subtarget.isTarget64BitLP64())
204 return &X86::GR64_NOSPRegClass;
205 // NOSP does not contain RIP, so no special case here.
206 return &X86::GR32_NOSPRegClass;
207 case 2: // NOREX GPRs.
208 if (Subtarget.isTarget64BitLP64())
209 return &X86::GR64_NOREXRegClass;
210 return &X86::GR32_NOREXRegClass;
211 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
212 if (Subtarget.isTarget64BitLP64())
213 return &X86::GR64_NOREX_NOSPRegClass;
214 // NOSP does not contain RIP, so no special case here.
215 return &X86::GR32_NOREX_NOSPRegClass;
216 case 4: // Available for tailcall (not callee-saved GPRs).
217 return getGPRsForTailCall(MF);
218 }
219 }
220
shouldRewriteCopySrc(const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg) const221 bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
222 unsigned DefSubReg,
223 const TargetRegisterClass *SrcRC,
224 unsigned SrcSubReg) const {
225 // Prevent rewriting a copy where the destination size is larger than the
226 // input size. See PR41619.
227 // FIXME: Should this be factored into the base implementation somehow.
228 if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
229 SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
230 return false;
231
232 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
233 SrcRC, SrcSubReg);
234 }
235
236 const TargetRegisterClass *
getGPRsForTailCall(const MachineFunction & MF) const237 X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
238 const Function &F = MF.getFunction();
239 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
240 return &X86::GR64_TCW64RegClass;
241 else if (Is64Bit)
242 return &X86::GR64_TCRegClass;
243
244 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
245 if (hasHipeCC)
246 return &X86::GR32RegClass;
247 return &X86::GR32_TCRegClass;
248 }
249
250 const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC) const251 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
252 if (RC == &X86::CCRRegClass) {
253 if (Is64Bit)
254 return &X86::GR64RegClass;
255 else
256 return &X86::GR32RegClass;
257 }
258 return RC;
259 }
260
261 unsigned
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const262 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
263 MachineFunction &MF) const {
264 const X86FrameLowering *TFI = getFrameLowering(MF);
265
266 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
267 switch (RC->getID()) {
268 default:
269 return 0;
270 case X86::GR32RegClassID:
271 return 4 - FPDiff;
272 case X86::GR64RegClassID:
273 return 12 - FPDiff;
274 case X86::VR128RegClassID:
275 return Is64Bit ? 10 : 4;
276 case X86::VR64RegClassID:
277 return 4;
278 }
279 }
280
281 const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const282 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
283 assert(MF && "MachineFunction required");
284
285 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
286 const Function &F = MF->getFunction();
287 bool HasSSE = Subtarget.hasSSE1();
288 bool HasAVX = Subtarget.hasAVX();
289 bool HasAVX512 = Subtarget.hasAVX512();
290 bool CallsEHReturn = MF->callsEHReturn();
291
292 CallingConv::ID CC = F.getCallingConv();
293
294 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
295 // convention because it has the CSR list.
296 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
297 CC = CallingConv::X86_INTR;
298
299 // If atribute specified, override the CSRs normally specified by the
300 // calling convention and use the empty set instead.
301 if (MF->getFunction().hasFnAttribute("no_callee_saved_registers"))
302 return CSR_NoRegs_SaveList;
303
304 switch (CC) {
305 case CallingConv::GHC:
306 case CallingConv::HiPE:
307 return CSR_NoRegs_SaveList;
308 case CallingConv::AnyReg:
309 if (HasAVX)
310 return CSR_64_AllRegs_AVX_SaveList;
311 return CSR_64_AllRegs_SaveList;
312 case CallingConv::PreserveMost:
313 return IsWin64 ? CSR_Win64_RT_MostRegs_SaveList
314 : CSR_64_RT_MostRegs_SaveList;
315 case CallingConv::PreserveAll:
316 if (HasAVX)
317 return CSR_64_RT_AllRegs_AVX_SaveList;
318 return CSR_64_RT_AllRegs_SaveList;
319 case CallingConv::CXX_FAST_TLS:
320 if (Is64Bit)
321 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
322 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
323 break;
324 case CallingConv::Intel_OCL_BI: {
325 if (HasAVX512 && IsWin64)
326 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
327 if (HasAVX512 && Is64Bit)
328 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
329 if (HasAVX && IsWin64)
330 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
331 if (HasAVX && Is64Bit)
332 return CSR_64_Intel_OCL_BI_AVX_SaveList;
333 if (!HasAVX && !IsWin64 && Is64Bit)
334 return CSR_64_Intel_OCL_BI_SaveList;
335 break;
336 }
337 case CallingConv::X86_RegCall:
338 if (Is64Bit) {
339 if (IsWin64) {
340 return (HasSSE ? CSR_Win64_RegCall_SaveList :
341 CSR_Win64_RegCall_NoSSE_SaveList);
342 } else {
343 return (HasSSE ? CSR_SysV64_RegCall_SaveList :
344 CSR_SysV64_RegCall_NoSSE_SaveList);
345 }
346 } else {
347 return (HasSSE ? CSR_32_RegCall_SaveList :
348 CSR_32_RegCall_NoSSE_SaveList);
349 }
350 case CallingConv::CFGuard_Check:
351 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
352 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList
353 : CSR_Win32_CFGuard_Check_NoSSE_SaveList);
354 case CallingConv::Cold:
355 if (Is64Bit)
356 return CSR_64_MostRegs_SaveList;
357 break;
358 case CallingConv::Win64:
359 if (!HasSSE)
360 return CSR_Win64_NoSSE_SaveList;
361 return CSR_Win64_SaveList;
362 case CallingConv::SwiftTail:
363 if (!Is64Bit)
364 return CSR_32_SaveList;
365 return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;
366 case CallingConv::X86_64_SysV:
367 if (CallsEHReturn)
368 return CSR_64EHRet_SaveList;
369 return CSR_64_SaveList;
370 case CallingConv::X86_INTR:
371 if (Is64Bit) {
372 if (HasAVX512)
373 return CSR_64_AllRegs_AVX512_SaveList;
374 if (HasAVX)
375 return CSR_64_AllRegs_AVX_SaveList;
376 if (HasSSE)
377 return CSR_64_AllRegs_SaveList;
378 return CSR_64_AllRegs_NoSSE_SaveList;
379 } else {
380 if (HasAVX512)
381 return CSR_32_AllRegs_AVX512_SaveList;
382 if (HasAVX)
383 return CSR_32_AllRegs_AVX_SaveList;
384 if (HasSSE)
385 return CSR_32_AllRegs_SSE_SaveList;
386 return CSR_32_AllRegs_SaveList;
387 }
388 default:
389 break;
390 }
391
392 if (Is64Bit) {
393 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
394 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
395 if (IsSwiftCC)
396 return IsWin64 ? CSR_Win64_SwiftError_SaveList
397 : CSR_64_SwiftError_SaveList;
398
399 if (IsWin64)
400 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
401 if (CallsEHReturn)
402 return CSR_64EHRet_SaveList;
403 return CSR_64_SaveList;
404 }
405
406 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
407 }
408
getCalleeSavedRegsViaCopy(const MachineFunction * MF) const409 const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
410 const MachineFunction *MF) const {
411 assert(MF && "Invalid MachineFunction pointer.");
412 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
413 MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
414 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
415 return nullptr;
416 }
417
418 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const419 X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
420 CallingConv::ID CC) const {
421 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
422 bool HasSSE = Subtarget.hasSSE1();
423 bool HasAVX = Subtarget.hasAVX();
424 bool HasAVX512 = Subtarget.hasAVX512();
425
426 switch (CC) {
427 case CallingConv::GHC:
428 case CallingConv::HiPE:
429 return CSR_NoRegs_RegMask;
430 case CallingConv::AnyReg:
431 if (HasAVX)
432 return CSR_64_AllRegs_AVX_RegMask;
433 return CSR_64_AllRegs_RegMask;
434 case CallingConv::PreserveMost:
435 return IsWin64 ? CSR_Win64_RT_MostRegs_RegMask : CSR_64_RT_MostRegs_RegMask;
436 case CallingConv::PreserveAll:
437 if (HasAVX)
438 return CSR_64_RT_AllRegs_AVX_RegMask;
439 return CSR_64_RT_AllRegs_RegMask;
440 case CallingConv::CXX_FAST_TLS:
441 if (Is64Bit)
442 return CSR_64_TLS_Darwin_RegMask;
443 break;
444 case CallingConv::Intel_OCL_BI: {
445 if (HasAVX512 && IsWin64)
446 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
447 if (HasAVX512 && Is64Bit)
448 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
449 if (HasAVX && IsWin64)
450 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
451 if (HasAVX && Is64Bit)
452 return CSR_64_Intel_OCL_BI_AVX_RegMask;
453 if (!HasAVX && !IsWin64 && Is64Bit)
454 return CSR_64_Intel_OCL_BI_RegMask;
455 break;
456 }
457 case CallingConv::X86_RegCall:
458 if (Is64Bit) {
459 if (IsWin64) {
460 return (HasSSE ? CSR_Win64_RegCall_RegMask :
461 CSR_Win64_RegCall_NoSSE_RegMask);
462 } else {
463 return (HasSSE ? CSR_SysV64_RegCall_RegMask :
464 CSR_SysV64_RegCall_NoSSE_RegMask);
465 }
466 } else {
467 return (HasSSE ? CSR_32_RegCall_RegMask :
468 CSR_32_RegCall_NoSSE_RegMask);
469 }
470 case CallingConv::CFGuard_Check:
471 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
472 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask
473 : CSR_Win32_CFGuard_Check_NoSSE_RegMask);
474 case CallingConv::Cold:
475 if (Is64Bit)
476 return CSR_64_MostRegs_RegMask;
477 break;
478 case CallingConv::Win64:
479 return CSR_Win64_RegMask;
480 case CallingConv::SwiftTail:
481 if (!Is64Bit)
482 return CSR_32_RegMask;
483 return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;
484 case CallingConv::X86_64_SysV:
485 return CSR_64_RegMask;
486 case CallingConv::X86_INTR:
487 if (Is64Bit) {
488 if (HasAVX512)
489 return CSR_64_AllRegs_AVX512_RegMask;
490 if (HasAVX)
491 return CSR_64_AllRegs_AVX_RegMask;
492 if (HasSSE)
493 return CSR_64_AllRegs_RegMask;
494 return CSR_64_AllRegs_NoSSE_RegMask;
495 } else {
496 if (HasAVX512)
497 return CSR_32_AllRegs_AVX512_RegMask;
498 if (HasAVX)
499 return CSR_32_AllRegs_AVX_RegMask;
500 if (HasSSE)
501 return CSR_32_AllRegs_SSE_RegMask;
502 return CSR_32_AllRegs_RegMask;
503 }
504 default:
505 break;
506 }
507
508 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
509 // callsEHReturn().
510 if (Is64Bit) {
511 const Function &F = MF.getFunction();
512 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
513 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
514 if (IsSwiftCC)
515 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
516
517 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
518 }
519
520 return CSR_32_RegMask;
521 }
522
523 const uint32_t*
getNoPreservedMask() const524 X86RegisterInfo::getNoPreservedMask() const {
525 return CSR_NoRegs_RegMask;
526 }
527
getDarwinTLSCallPreservedMask() const528 const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
529 return CSR_64_TLS_Darwin_RegMask;
530 }
531
getReservedRegs(const MachineFunction & MF) const532 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
533 BitVector Reserved(getNumRegs());
534 const X86FrameLowering *TFI = getFrameLowering(MF);
535
536 // Set the floating point control register as reserved.
537 Reserved.set(X86::FPCW);
538
539 // Set the floating point status register as reserved.
540 Reserved.set(X86::FPSW);
541
542 // Set the SIMD floating point control register as reserved.
543 Reserved.set(X86::MXCSR);
544
545 // Set the stack-pointer register and its aliases as reserved.
546 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))
547 Reserved.set(SubReg);
548
549 // Set the Shadow Stack Pointer as reserved.
550 Reserved.set(X86::SSP);
551
552 // Set the instruction pointer register and its aliases as reserved.
553 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))
554 Reserved.set(SubReg);
555
556 // Set the frame-pointer register and its aliases as reserved if needed.
557 if (TFI->hasFP(MF)) {
558 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
559 Reserved.set(SubReg);
560 }
561
562 // Set the base-pointer register and its aliases as reserved if needed.
563 if (hasBasePointer(MF)) {
564 CallingConv::ID CC = MF.getFunction().getCallingConv();
565 const uint32_t *RegMask = getCallPreservedMask(MF, CC);
566 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
567 report_fatal_error(
568 "Stack realignment in presence of dynamic allocas is not supported with"
569 "this calling convention.");
570
571 Register BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
572 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))
573 Reserved.set(SubReg);
574 }
575
576 // Mark the segment registers as reserved.
577 Reserved.set(X86::CS);
578 Reserved.set(X86::SS);
579 Reserved.set(X86::DS);
580 Reserved.set(X86::ES);
581 Reserved.set(X86::FS);
582 Reserved.set(X86::GS);
583
584 // Mark the floating point stack registers as reserved.
585 for (unsigned n = 0; n != 8; ++n)
586 Reserved.set(X86::ST0 + n);
587
588 // Reserve the registers that only exist in 64-bit mode.
589 if (!Is64Bit) {
590 // These 8-bit registers are part of the x86-64 extension even though their
591 // super-registers are old 32-bits.
592 Reserved.set(X86::SIL);
593 Reserved.set(X86::DIL);
594 Reserved.set(X86::BPL);
595 Reserved.set(X86::SPL);
596 Reserved.set(X86::SIH);
597 Reserved.set(X86::DIH);
598 Reserved.set(X86::BPH);
599 Reserved.set(X86::SPH);
600
601 for (unsigned n = 0; n != 8; ++n) {
602 // R8, R9, ...
603 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
604 Reserved.set(*AI);
605
606 // XMM8, XMM9, ...
607 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
608 Reserved.set(*AI);
609 }
610 }
611 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
612 for (unsigned n = 0; n != 16; ++n) {
613 for (MCRegAliasIterator AI(X86::XMM16 + n, this, true); AI.isValid();
614 ++AI)
615 Reserved.set(*AI);
616 }
617 }
618
619 // Reserve the extended general purpose registers.
620 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasEGPR())
621 Reserved.set(X86::R16, X86::R31WH + 1);
622
623 if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
624 for (MCRegAliasIterator AI(X86::R14, this, true); AI.isValid(); ++AI)
625 Reserved.set(*AI);
626 for (MCRegAliasIterator AI(X86::R15, this, true); AI.isValid(); ++AI)
627 Reserved.set(*AI);
628 }
629
630 assert(checkAllSuperRegsMarked(Reserved,
631 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
632 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
633 return Reserved;
634 }
635
getNumSupportedRegs(const MachineFunction & MF) const636 unsigned X86RegisterInfo::getNumSupportedRegs(const MachineFunction &MF) const {
637 // All existing Intel CPUs that support AMX support AVX512 and all existing
638 // Intel CPUs that support APX support AMX. AVX512 implies AVX.
639 //
640 // We enumerate the registers in X86GenRegisterInfo.inc in this order:
641 //
642 // Registers before AVX512,
643 // AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
644 // AMX registers (TMM)
645 // APX registers (R16-R31)
646 //
647 // and try to return the minimum number of registers supported by the target.
648 assert((X86::R15WH + 1 == X86 ::YMM0) && (X86::YMM15 + 1 == X86::K0) &&
649 (X86::K6_K7 + 1 == X86::TMMCFG) && (X86::TMM7 + 1 == X86::R16) &&
650 (X86::R31WH + 1 == X86::NUM_TARGET_REGS) &&
651 "Register number may be incorrect");
652
653 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
654 if (ST.hasEGPR())
655 return X86::NUM_TARGET_REGS;
656 if (ST.hasAMXTILE())
657 return X86::TMM7 + 1;
658 if (ST.hasAVX512())
659 return X86::K6_K7 + 1;
660 if (ST.hasAVX())
661 return X86::YMM15 + 1;
662 return X86::R15WH + 1;
663 }
664
isArgumentRegister(const MachineFunction & MF,MCRegister Reg) const665 bool X86RegisterInfo::isArgumentRegister(const MachineFunction &MF,
666 MCRegister Reg) const {
667 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
668 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
669 auto IsSubReg = [&](MCRegister RegA, MCRegister RegB) {
670 return TRI.isSuperOrSubRegisterEq(RegA, RegB);
671 };
672
673 if (!ST.is64Bit())
674 return llvm::any_of(
675 SmallVector<MCRegister>{X86::EAX, X86::ECX, X86::EDX},
676 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||
677 (ST.hasMMX() && X86::VR64RegClass.contains(Reg));
678
679 CallingConv::ID CC = MF.getFunction().getCallingConv();
680
681 if (CC == CallingConv::X86_64_SysV && IsSubReg(X86::RAX, Reg))
682 return true;
683
684 if (llvm::any_of(
685 SmallVector<MCRegister>{X86::RDX, X86::RCX, X86::R8, X86::R9},
686 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
687 return true;
688
689 if (CC != CallingConv::Win64 &&
690 llvm::any_of(SmallVector<MCRegister>{X86::RDI, X86::RSI},
691 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
692 return true;
693
694 if (ST.hasSSE1() &&
695 llvm::any_of(SmallVector<MCRegister>{X86::XMM0, X86::XMM1, X86::XMM2,
696 X86::XMM3, X86::XMM4, X86::XMM5,
697 X86::XMM6, X86::XMM7},
698 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
699 return true;
700
701 return X86GenRegisterInfo::isArgumentRegister(MF, Reg);
702 }
703
isFixedRegister(const MachineFunction & MF,MCRegister PhysReg) const704 bool X86RegisterInfo::isFixedRegister(const MachineFunction &MF,
705 MCRegister PhysReg) const {
706 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
707 const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
708
709 // Stack pointer.
710 if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))
711 return true;
712
713 // Don't use the frame pointer if it's being used.
714 const X86FrameLowering &TFI = *getFrameLowering(MF);
715 if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))
716 return true;
717
718 return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);
719 }
720
isTileRegisterClass(const TargetRegisterClass * RC) const721 bool X86RegisterInfo::isTileRegisterClass(const TargetRegisterClass *RC) const {
722 return RC->getID() == X86::TILERegClassID;
723 }
724
adjustStackMapLiveOutMask(uint32_t * Mask) const725 void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
726 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
727 // because the calling convention defines the EFLAGS register as NOT
728 // preserved.
729 //
730 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
731 // an assert to track this and clear the register afterwards to avoid
732 // unnecessary crashes during release builds.
733 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
734 "EFLAGS are not live-out from a patchpoint.");
735
736 // Also clean other registers that don't need preserving (IP).
737 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
738 Mask[Reg / 32] &= ~(1U << (Reg % 32));
739 }
740
741 //===----------------------------------------------------------------------===//
742 // Stack Frame Processing methods
743 //===----------------------------------------------------------------------===//
744
CantUseSP(const MachineFrameInfo & MFI)745 static bool CantUseSP(const MachineFrameInfo &MFI) {
746 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
747 }
748
hasBasePointer(const MachineFunction & MF) const749 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
750 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
751 // We have a virtual register to reference argument, and don't need base
752 // pointer.
753 if (X86FI->getStackPtrSaveMI() != nullptr)
754 return false;
755
756 if (X86FI->hasPreallocatedCall())
757 return true;
758
759 const MachineFrameInfo &MFI = MF.getFrameInfo();
760
761 if (!EnableBasePointer)
762 return false;
763
764 // When we need stack realignment, we can't address the stack from the frame
765 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
766 // can't address variables from the stack pointer. MS inline asm can
767 // reference locals while also adjusting the stack pointer. When we can't
768 // use both the SP and the FP, we need a separate base pointer register.
769 bool CantUseFP = hasStackRealignment(MF);
770 return CantUseFP && CantUseSP(MFI);
771 }
772
canRealignStack(const MachineFunction & MF) const773 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
774 if (!TargetRegisterInfo::canRealignStack(MF))
775 return false;
776
777 const MachineFrameInfo &MFI = MF.getFrameInfo();
778 const MachineRegisterInfo *MRI = &MF.getRegInfo();
779
780 // Stack realignment requires a frame pointer. If we already started
781 // register allocation with frame pointer elimination, it is too late now.
782 if (!MRI->canReserveReg(FramePtr))
783 return false;
784
785 // If a base pointer is necessary. Check that it isn't too late to reserve
786 // it.
787 if (CantUseSP(MFI))
788 return MRI->canReserveReg(BasePtr);
789 return true;
790 }
791
shouldRealignStack(const MachineFunction & MF) const792 bool X86RegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
793 if (TargetRegisterInfo::shouldRealignStack(MF))
794 return true;
795
796 return !Is64Bit && MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
797 }
798
799 // tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
800 // of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
801 // TODO: In this case we should be really trying first to entirely eliminate
802 // this instruction which is a plain copy.
tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)803 static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
804 MachineInstr &MI = *II;
805 unsigned Opc = II->getOpcode();
806 // Check if this is a LEA of the form 'lea (%esp), %ebx'
807 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
808 MI.getOperand(2).getImm() != 1 ||
809 MI.getOperand(3).getReg() != X86::NoRegister ||
810 MI.getOperand(4).getImm() != 0 ||
811 MI.getOperand(5).getReg() != X86::NoRegister)
812 return false;
813 Register BasePtr = MI.getOperand(1).getReg();
814 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
815 // be replaced with a 32-bit operand MOV which will zero extend the upper
816 // 32-bits of the super register.
817 if (Opc == X86::LEA64_32r)
818 BasePtr = getX86SubSuperRegister(BasePtr, 32);
819 Register NewDestReg = MI.getOperand(0).getReg();
820 const X86InstrInfo *TII =
821 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
822 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
823 MI.getOperand(1).isKill());
824 MI.eraseFromParent();
825 return true;
826 }
827
isFuncletReturnInstr(MachineInstr & MI)828 static bool isFuncletReturnInstr(MachineInstr &MI) {
829 switch (MI.getOpcode()) {
830 case X86::CATCHRET:
831 case X86::CLEANUPRET:
832 return true;
833 default:
834 return false;
835 }
836 llvm_unreachable("impossible");
837 }
838
eliminateFrameIndex(MachineBasicBlock::iterator II,unsigned FIOperandNum,Register BaseReg,int FIOffset) const839 void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
840 unsigned FIOperandNum,
841 Register BaseReg,
842 int FIOffset) const {
843 MachineInstr &MI = *II;
844 unsigned Opc = MI.getOpcode();
845 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
846 MachineOperand &FI = MI.getOperand(FIOperandNum);
847 FI.ChangeToImmediate(FIOffset);
848 return;
849 }
850
851 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
852
853 // The frame index format for stackmaps and patchpoints is different from the
854 // X86 format. It only has a FI and an offset.
855 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
856 assert(BasePtr == FramePtr && "Expected the FP as base register");
857 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
858 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
859 return;
860 }
861
862 if (MI.getOperand(FIOperandNum + 3).isImm()) {
863 // Offset is a 32-bit integer.
864 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
865 int Offset = FIOffset + Imm;
866 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
867 "Requesting 64-bit offset in 32-bit immediate!");
868 if (Offset != 0)
869 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
870 } else {
871 // Offset is symbolic. This is extremely rare.
872 uint64_t Offset =
873 FIOffset + (uint64_t)MI.getOperand(FIOperandNum + 3).getOffset();
874 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
875 }
876 }
877
878 bool
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const879 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
880 int SPAdj, unsigned FIOperandNum,
881 RegScavenger *RS) const {
882 MachineInstr &MI = *II;
883 MachineBasicBlock &MBB = *MI.getParent();
884 MachineFunction &MF = *MBB.getParent();
885 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
886 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
887 : isFuncletReturnInstr(*MBBI);
888 const X86FrameLowering *TFI = getFrameLowering(MF);
889 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
890
891 // Determine base register and offset.
892 int FIOffset;
893 Register BasePtr;
894 if (MI.isReturn()) {
895 assert((!hasStackRealignment(MF) ||
896 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
897 "Return instruction can only reference SP relative frame objects");
898 FIOffset =
899 TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed();
900 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
901 FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
902 } else {
903 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed();
904 }
905
906 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
907 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
908 // offset is from the traditional base pointer location. On 64-bit, the
909 // offset is from the SP at the end of the prologue, not the FP location. This
910 // matches the behavior of llvm.frameaddress.
911 unsigned Opc = MI.getOpcode();
912 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
913 MachineOperand &FI = MI.getOperand(FIOperandNum);
914 FI.ChangeToImmediate(FIOffset);
915 return false;
916 }
917
918 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
919 // register as source operand, semantic is the same and destination is
920 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
921 // Don't change BasePtr since it is used later for stack adjustment.
922 Register MachineBasePtr = BasePtr;
923 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
924 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
925
926 // This must be part of a four operand memory reference. Replace the
927 // FrameIndex with base register. Add an offset to the offset.
928 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
929
930 if (BasePtr == StackPtr)
931 FIOffset += SPAdj;
932
933 // The frame index format for stackmaps and patchpoints is different from the
934 // X86 format. It only has a FI and an offset.
935 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
936 assert(BasePtr == FramePtr && "Expected the FP as base register");
937 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
938 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
939 return false;
940 }
941
942 if (MI.getOperand(FIOperandNum+3).isImm()) {
943 // Offset is a 32-bit integer.
944 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
945 int Offset = FIOffset + Imm;
946 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
947 "Requesting 64-bit offset in 32-bit immediate!");
948 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
949 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
950 } else {
951 // Offset is symbolic. This is extremely rare.
952 uint64_t Offset = FIOffset +
953 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
954 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
955 }
956 return false;
957 }
958
findDeadCallerSavedReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI) const959 unsigned X86RegisterInfo::findDeadCallerSavedReg(
960 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const {
961 const MachineFunction *MF = MBB.getParent();
962 if (MF->callsEHReturn())
963 return 0;
964
965 const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF);
966
967 if (MBBI == MBB.end())
968 return 0;
969
970 switch (MBBI->getOpcode()) {
971 default:
972 return 0;
973 case TargetOpcode::PATCHABLE_RET:
974 case X86::RET:
975 case X86::RET32:
976 case X86::RET64:
977 case X86::RETI32:
978 case X86::RETI64:
979 case X86::TCRETURNdi:
980 case X86::TCRETURNri:
981 case X86::TCRETURNmi:
982 case X86::TCRETURNdi64:
983 case X86::TCRETURNri64:
984 case X86::TCRETURNmi64:
985 case X86::EH_RETURN:
986 case X86::EH_RETURN64: {
987 SmallSet<uint16_t, 8> Uses;
988 for (MachineOperand &MO : MBBI->operands()) {
989 if (!MO.isReg() || MO.isDef())
990 continue;
991 Register Reg = MO.getReg();
992 if (!Reg)
993 continue;
994 for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI)
995 Uses.insert(*AI);
996 }
997
998 for (auto CS : AvailableRegs)
999 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP)
1000 return CS;
1001 }
1002 }
1003
1004 return 0;
1005 }
1006
getFrameRegister(const MachineFunction & MF) const1007 Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1008 const X86FrameLowering *TFI = getFrameLowering(MF);
1009 return TFI->hasFP(MF) ? FramePtr : StackPtr;
1010 }
1011
1012 unsigned
getPtrSizedFrameRegister(const MachineFunction & MF) const1013 X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
1014 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1015 Register FrameReg = getFrameRegister(MF);
1016 if (Subtarget.isTarget64BitILP32())
1017 FrameReg = getX86SubSuperRegister(FrameReg, 32);
1018 return FrameReg;
1019 }
1020
1021 unsigned
getPtrSizedStackRegister(const MachineFunction & MF) const1022 X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const {
1023 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
1024 Register StackReg = getStackRegister();
1025 if (Subtarget.isTarget64BitILP32())
1026 StackReg = getX86SubSuperRegister(StackReg, 32);
1027 return StackReg;
1028 }
1029
getTileShape(Register VirtReg,VirtRegMap * VRM,const MachineRegisterInfo * MRI)1030 static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM,
1031 const MachineRegisterInfo *MRI) {
1032 if (VRM->hasShape(VirtReg))
1033 return VRM->getShape(VirtReg);
1034
1035 const MachineOperand &Def = *MRI->def_begin(VirtReg);
1036 MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent());
1037 unsigned OpCode = MI->getOpcode();
1038 switch (OpCode) {
1039 default:
1040 llvm_unreachable("Unexpected machine instruction on tile register!");
1041 break;
1042 case X86::COPY: {
1043 Register SrcReg = MI->getOperand(1).getReg();
1044 ShapeT Shape = getTileShape(SrcReg, VRM, MRI);
1045 VRM->assignVirt2Shape(VirtReg, Shape);
1046 return Shape;
1047 }
1048 // We only collect the tile shape that is defined.
1049 case X86::PTILELOADDV:
1050 case X86::PTILELOADDT1V:
1051 case X86::PTDPBSSDV:
1052 case X86::PTDPBSUDV:
1053 case X86::PTDPBUSDV:
1054 case X86::PTDPBUUDV:
1055 case X86::PTILEZEROV:
1056 case X86::PTDPBF16PSV:
1057 case X86::PTDPFP16PSV:
1058 case X86::PTCMMIMFP16PSV:
1059 case X86::PTCMMRLFP16PSV:
1060 MachineOperand &MO1 = MI->getOperand(1);
1061 MachineOperand &MO2 = MI->getOperand(2);
1062 ShapeT Shape(&MO1, &MO2, MRI);
1063 VRM->assignVirt2Shape(VirtReg, Shape);
1064 return Shape;
1065 }
1066 }
1067
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const1068 bool X86RegisterInfo::getRegAllocationHints(Register VirtReg,
1069 ArrayRef<MCPhysReg> Order,
1070 SmallVectorImpl<MCPhysReg> &Hints,
1071 const MachineFunction &MF,
1072 const VirtRegMap *VRM,
1073 const LiveRegMatrix *Matrix) const {
1074 const MachineRegisterInfo *MRI = &MF.getRegInfo();
1075 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
1076 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
1077 VirtReg, Order, Hints, MF, VRM, Matrix);
1078
1079 unsigned ID = RC.getID();
1080 if (ID != X86::TILERegClassID)
1081 return BaseImplRetVal;
1082
1083 ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
1084 auto AddHint = [&](MCPhysReg PhysReg) {
1085 Register VReg = Matrix->getOneVReg(PhysReg);
1086 if (VReg == MCRegister::NoRegister) { // Not allocated yet
1087 Hints.push_back(PhysReg);
1088 return;
1089 }
1090 ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI);
1091 if (PhysShape == VirtShape)
1092 Hints.push_back(PhysReg);
1093 };
1094
1095 SmallSet<MCPhysReg, 4> CopyHints;
1096 CopyHints.insert(Hints.begin(), Hints.end());
1097 Hints.clear();
1098 for (auto Hint : CopyHints) {
1099 if (RC.contains(Hint) && !MRI->isReserved(Hint))
1100 AddHint(Hint);
1101 }
1102 for (MCPhysReg PhysReg : Order) {
1103 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&
1104 !MRI->isReserved(PhysReg))
1105 AddHint(PhysReg);
1106 }
1107
1108 #define DEBUG_TYPE "tile-hint"
1109 LLVM_DEBUG({
1110 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";
1111 for (auto Hint : Hints) {
1112 dbgs() << "tmm" << Hint << ",";
1113 }
1114 dbgs() << "\n";
1115 });
1116 #undef DEBUG_TYPE
1117
1118 return true;
1119 }
1120