1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetRegisterInfo interface.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/BinaryFormat/Dwarf.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/Config/llvm-config.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/DebugInfoMetadata.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/MC/MCRegisterInfo.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Compiler.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/MachineValueType.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/Printable.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include <cassert>
41 #include <utility>
42
43 #define DEBUG_TYPE "target-reg-info"
44
45 using namespace llvm;
46
47 static cl::opt<unsigned>
48 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
49 cl::desc("A threshold of live range size which may cause "
50 "high compile time cost in global splitting."),
51 cl::init(5000));
52
TargetRegisterInfo(const TargetRegisterInfoDesc * ID,regclass_iterator RCB,regclass_iterator RCE,const char * const * SRINames,const LaneBitmask * SRILaneMasks,LaneBitmask SRICoveringLanes,const RegClassInfo * const RCIs,unsigned Mode)53 TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
54 regclass_iterator RCB, regclass_iterator RCE,
55 const char *const *SRINames,
56 const LaneBitmask *SRILaneMasks,
57 LaneBitmask SRICoveringLanes,
58 const RegClassInfo *const RCIs,
59 unsigned Mode)
60 : InfoDesc(ID), SubRegIndexNames(SRINames),
61 SubRegIndexLaneMasks(SRILaneMasks),
62 RegClassBegin(RCB), RegClassEnd(RCE),
63 CoveringLanes(SRICoveringLanes),
64 RCInfos(RCIs), HwMode(Mode) {
65 }
66
67 TargetRegisterInfo::~TargetRegisterInfo() = default;
68
shouldRegionSplitForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg) const69 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
70 const MachineFunction &MF, const LiveInterval &VirtReg) const {
71 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
72 const MachineRegisterInfo &MRI = MF.getRegInfo();
73 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg());
74 if (MI && TII->isTriviallyReMaterializable(*MI) &&
75 VirtReg.size() > HugeSizeForSplit)
76 return false;
77 return true;
78 }
79
markSuperRegs(BitVector & RegisterSet,MCRegister Reg) const80 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
81 MCRegister Reg) const {
82 for (MCSuperRegIterator AI(Reg, this, true); AI.isValid(); ++AI)
83 RegisterSet.set(*AI);
84 }
85
checkAllSuperRegsMarked(const BitVector & RegisterSet,ArrayRef<MCPhysReg> Exceptions) const86 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
87 ArrayRef<MCPhysReg> Exceptions) const {
88 // Check that all super registers of reserved regs are reserved as well.
89 BitVector Checked(getNumRegs());
90 for (unsigned Reg : RegisterSet.set_bits()) {
91 if (Checked[Reg])
92 continue;
93 for (MCSuperRegIterator SR(Reg, this); SR.isValid(); ++SR) {
94 if (!RegisterSet[*SR] && !is_contained(Exceptions, Reg)) {
95 dbgs() << "Error: Super register " << printReg(*SR, this)
96 << " of reserved register " << printReg(Reg, this)
97 << " is not reserved.\n";
98 return false;
99 }
100
101 // We transitively check superregs. So we can remember this for later
102 // to avoid compiletime explosion in deep register hierarchies.
103 Checked.set(*SR);
104 }
105 }
106 return true;
107 }
108
109 namespace llvm {
110
printReg(Register Reg,const TargetRegisterInfo * TRI,unsigned SubIdx,const MachineRegisterInfo * MRI)111 Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
112 unsigned SubIdx, const MachineRegisterInfo *MRI) {
113 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
114 if (!Reg)
115 OS << "$noreg";
116 else if (Register::isStackSlot(Reg))
117 OS << "SS#" << Register::stackSlot2Index(Reg);
118 else if (Reg.isVirtual()) {
119 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
120 if (Name != "") {
121 OS << '%' << Name;
122 } else {
123 OS << '%' << Register::virtReg2Index(Reg);
124 }
125 } else if (!TRI)
126 OS << '$' << "physreg" << Reg;
127 else if (Reg < TRI->getNumRegs()) {
128 OS << '$';
129 printLowerCase(TRI->getName(Reg), OS);
130 } else
131 llvm_unreachable("Register kind is unsupported.");
132
133 if (SubIdx) {
134 if (TRI)
135 OS << ':' << TRI->getSubRegIndexName(SubIdx);
136 else
137 OS << ":sub(" << SubIdx << ')';
138 }
139 });
140 }
141
printRegUnit(unsigned Unit,const TargetRegisterInfo * TRI)142 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
143 return Printable([Unit, TRI](raw_ostream &OS) {
144 // Generic printout when TRI is missing.
145 if (!TRI) {
146 OS << "Unit~" << Unit;
147 return;
148 }
149
150 // Check for invalid register units.
151 if (Unit >= TRI->getNumRegUnits()) {
152 OS << "BadUnit~" << Unit;
153 return;
154 }
155
156 // Normal units have at least one root.
157 MCRegUnitRootIterator Roots(Unit, TRI);
158 assert(Roots.isValid() && "Unit has no roots.");
159 OS << TRI->getName(*Roots);
160 for (++Roots; Roots.isValid(); ++Roots)
161 OS << '~' << TRI->getName(*Roots);
162 });
163 }
164
printVRegOrUnit(unsigned Unit,const TargetRegisterInfo * TRI)165 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
166 return Printable([Unit, TRI](raw_ostream &OS) {
167 if (Register::isVirtualRegister(Unit)) {
168 OS << '%' << Register::virtReg2Index(Unit);
169 } else {
170 OS << printRegUnit(Unit, TRI);
171 }
172 });
173 }
174
printRegClassOrBank(Register Reg,const MachineRegisterInfo & RegInfo,const TargetRegisterInfo * TRI)175 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
176 const TargetRegisterInfo *TRI) {
177 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
178 if (RegInfo.getRegClassOrNull(Reg))
179 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
180 else if (RegInfo.getRegBankOrNull(Reg))
181 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
182 else {
183 OS << "_";
184 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
185 "Generic registers must have a valid type");
186 }
187 });
188 }
189
190 } // end namespace llvm
191
192 /// getAllocatableClass - Return the maximal subclass of the given register
193 /// class that is alloctable, or NULL.
194 const TargetRegisterClass *
getAllocatableClass(const TargetRegisterClass * RC) const195 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
196 if (!RC || RC->isAllocatable())
197 return RC;
198
199 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
200 ++It) {
201 const TargetRegisterClass *SubRC = getRegClass(It.getID());
202 if (SubRC->isAllocatable())
203 return SubRC;
204 }
205 return nullptr;
206 }
207
208 /// getMinimalPhysRegClass - Returns the Register Class of a physical
209 /// register of the given type, picking the most sub register class of
210 /// the right type that contains this physreg.
211 const TargetRegisterClass *
getMinimalPhysRegClass(MCRegister reg,MVT VT) const212 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const {
213 assert(Register::isPhysicalRegister(reg) &&
214 "reg must be a physical register");
215
216 // Pick the most sub register class of the right type that contains
217 // this physreg.
218 const TargetRegisterClass* BestRC = nullptr;
219 for (const TargetRegisterClass* RC : regclasses()) {
220 if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) &&
221 RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC)))
222 BestRC = RC;
223 }
224
225 assert(BestRC && "Couldn't find the register class");
226 return BestRC;
227 }
228
229 const TargetRegisterClass *
getMinimalPhysRegClassLLT(MCRegister reg,LLT Ty) const230 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const {
231 assert(Register::isPhysicalRegister(reg) &&
232 "reg must be a physical register");
233
234 // Pick the most sub register class of the right type that contains
235 // this physreg.
236 const TargetRegisterClass *BestRC = nullptr;
237 for (const TargetRegisterClass *RC : regclasses()) {
238 if ((!Ty.isValid() || isTypeLegalForClass(*RC, Ty)) && RC->contains(reg) &&
239 (!BestRC || BestRC->hasSubClass(RC)))
240 BestRC = RC;
241 }
242
243 return BestRC;
244 }
245
246 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
247 /// registers for the specific register class.
getAllocatableSetForRC(const MachineFunction & MF,const TargetRegisterClass * RC,BitVector & R)248 static void getAllocatableSetForRC(const MachineFunction &MF,
249 const TargetRegisterClass *RC, BitVector &R){
250 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
251 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
252 for (MCPhysReg PR : Order)
253 R.set(PR);
254 }
255
getAllocatableSet(const MachineFunction & MF,const TargetRegisterClass * RC) const256 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
257 const TargetRegisterClass *RC) const {
258 BitVector Allocatable(getNumRegs());
259 if (RC) {
260 // A register class with no allocatable subclass returns an empty set.
261 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
262 if (SubClass)
263 getAllocatableSetForRC(MF, SubClass, Allocatable);
264 } else {
265 for (const TargetRegisterClass *C : regclasses())
266 if (C->isAllocatable())
267 getAllocatableSetForRC(MF, C, Allocatable);
268 }
269
270 // Mask out the reserved registers
271 const MachineRegisterInfo &MRI = MF.getRegInfo();
272 const BitVector &Reserved = MRI.getReservedRegs();
273 Allocatable.reset(Reserved);
274
275 return Allocatable;
276 }
277
278 static inline
firstCommonClass(const uint32_t * A,const uint32_t * B,const TargetRegisterInfo * TRI)279 const TargetRegisterClass *firstCommonClass(const uint32_t *A,
280 const uint32_t *B,
281 const TargetRegisterInfo *TRI) {
282 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
283 if (unsigned Common = *A++ & *B++)
284 return TRI->getRegClass(I + countTrailingZeros(Common));
285 return nullptr;
286 }
287
288 const TargetRegisterClass *
getCommonSubClass(const TargetRegisterClass * A,const TargetRegisterClass * B) const289 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
290 const TargetRegisterClass *B) const {
291 // First take care of the trivial cases.
292 if (A == B)
293 return A;
294 if (!A || !B)
295 return nullptr;
296
297 // Register classes are ordered topologically, so the largest common
298 // sub-class it the common sub-class with the smallest ID.
299 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
300 }
301
302 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned Idx) const303 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
304 const TargetRegisterClass *B,
305 unsigned Idx) const {
306 assert(A && B && "Missing register class");
307 assert(Idx && "Bad sub-register index");
308
309 // Find Idx in the list of super-register indices.
310 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
311 if (RCI.getSubReg() == Idx)
312 // The bit mask contains all register classes that are projected into B
313 // by Idx. Find a class that is also a sub-class of A.
314 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
315 return nullptr;
316 }
317
318 const TargetRegisterClass *TargetRegisterInfo::
getCommonSuperRegClass(const TargetRegisterClass * RCA,unsigned SubA,const TargetRegisterClass * RCB,unsigned SubB,unsigned & PreA,unsigned & PreB) const319 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
320 const TargetRegisterClass *RCB, unsigned SubB,
321 unsigned &PreA, unsigned &PreB) const {
322 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
323
324 // Search all pairs of sub-register indices that project into RCA and RCB
325 // respectively. This is quadratic, but usually the sets are very small. On
326 // most targets like X86, there will only be a single sub-register index
327 // (e.g., sub_16bit projecting into GR16).
328 //
329 // The worst case is a register class like DPR on ARM.
330 // We have indices dsub_0..dsub_7 projecting into that class.
331 //
332 // It is very common that one register class is a sub-register of the other.
333 // Arrange for RCA to be the larger register so the answer will be found in
334 // the first iteration. This makes the search linear for the most common
335 // case.
336 const TargetRegisterClass *BestRC = nullptr;
337 unsigned *BestPreA = &PreA;
338 unsigned *BestPreB = &PreB;
339 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
340 std::swap(RCA, RCB);
341 std::swap(SubA, SubB);
342 std::swap(BestPreA, BestPreB);
343 }
344
345 // Also terminate the search one we have found a register class as small as
346 // RCA.
347 unsigned MinSize = getRegSizeInBits(*RCA);
348
349 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
350 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
351 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
352 // Check if a common super-register class exists for this index pair.
353 const TargetRegisterClass *RC =
354 firstCommonClass(IA.getMask(), IB.getMask(), this);
355 if (!RC || getRegSizeInBits(*RC) < MinSize)
356 continue;
357
358 // The indexes must compose identically: PreA+SubA == PreB+SubB.
359 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
360 if (FinalA != FinalB)
361 continue;
362
363 // Is RC a better candidate than BestRC?
364 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
365 continue;
366
367 // Yes, RC is the smallest super-register seen so far.
368 BestRC = RC;
369 *BestPreA = IA.getSubReg();
370 *BestPreB = IB.getSubReg();
371
372 // Bail early if we reached MinSize. We won't find a better candidate.
373 if (getRegSizeInBits(*BestRC) == MinSize)
374 return BestRC;
375 }
376 }
377 return BestRC;
378 }
379
380 /// Check if the registers defined by the pair (RegisterClass, SubReg)
381 /// share the same register file.
shareSameRegisterFile(const TargetRegisterInfo & TRI,const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg)382 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
383 const TargetRegisterClass *DefRC,
384 unsigned DefSubReg,
385 const TargetRegisterClass *SrcRC,
386 unsigned SrcSubReg) {
387 // Same register class.
388 if (DefRC == SrcRC)
389 return true;
390
391 // Both operands are sub registers. Check if they share a register class.
392 unsigned SrcIdx, DefIdx;
393 if (SrcSubReg && DefSubReg) {
394 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
395 SrcIdx, DefIdx) != nullptr;
396 }
397
398 // At most one of the register is a sub register, make it Src to avoid
399 // duplicating the test.
400 if (!SrcSubReg) {
401 std::swap(DefSubReg, SrcSubReg);
402 std::swap(DefRC, SrcRC);
403 }
404
405 // One of the register is a sub register, check if we can get a superclass.
406 if (SrcSubReg)
407 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
408
409 // Plain copy.
410 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
411 }
412
shouldRewriteCopySrc(const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg) const413 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
414 unsigned DefSubReg,
415 const TargetRegisterClass *SrcRC,
416 unsigned SrcSubReg) const {
417 // If this source does not incur a cross register bank copy, use it.
418 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg);
419 }
420
421 // Compute target-independent register allocator hints to help eliminate copies.
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const422 bool TargetRegisterInfo::getRegAllocationHints(
423 Register VirtReg, ArrayRef<MCPhysReg> Order,
424 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
425 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
426 const MachineRegisterInfo &MRI = MF.getRegInfo();
427 const std::pair<Register, SmallVector<Register, 4>> &Hints_MRI =
428 MRI.getRegAllocationHints(VirtReg);
429
430 SmallSet<Register, 32> HintedRegs;
431 // First hint may be a target hint.
432 bool Skip = (Hints_MRI.first != 0);
433 for (auto Reg : Hints_MRI.second) {
434 if (Skip) {
435 Skip = false;
436 continue;
437 }
438
439 // Target-independent hints are either a physical or a virtual register.
440 Register Phys = Reg;
441 if (VRM && Phys.isVirtual())
442 Phys = VRM->getPhys(Phys);
443
444 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
445 // registers allocated to the same physreg).
446 if (!HintedRegs.insert(Phys).second)
447 continue;
448 // Check that Phys is a valid hint in VirtReg's register class.
449 if (!Phys.isPhysical())
450 continue;
451 if (MRI.isReserved(Phys))
452 continue;
453 // Check that Phys is in the allocation order. We shouldn't heed hints
454 // from VirtReg's register class if they aren't in the allocation order. The
455 // target probably has a reason for removing the register.
456 if (!is_contained(Order, Phys))
457 continue;
458
459 // All clear, tell the register allocator to prefer this register.
460 Hints.push_back(Phys);
461 }
462 return false;
463 }
464
isCalleeSavedPhysReg(MCRegister PhysReg,const MachineFunction & MF) const465 bool TargetRegisterInfo::isCalleeSavedPhysReg(
466 MCRegister PhysReg, const MachineFunction &MF) const {
467 if (PhysReg == 0)
468 return false;
469 const uint32_t *callerPreservedRegs =
470 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
471 if (callerPreservedRegs) {
472 assert(Register::isPhysicalRegister(PhysReg) &&
473 "Expected physical register");
474 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
475 }
476 return false;
477 }
478
canRealignStack(const MachineFunction & MF) const479 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
480 return !MF.getFunction().hasFnAttribute("no-realign-stack");
481 }
482
shouldRealignStack(const MachineFunction & MF) const483 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
484 const MachineFrameInfo &MFI = MF.getFrameInfo();
485 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
486 const Function &F = MF.getFunction();
487 return F.hasFnAttribute("stackrealign") ||
488 (MFI.getMaxAlign() > TFI->getStackAlign()) ||
489 F.hasFnAttribute(Attribute::StackAlignment);
490 }
491
regmaskSubsetEqual(const uint32_t * mask0,const uint32_t * mask1) const492 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
493 const uint32_t *mask1) const {
494 unsigned N = (getNumRegs()+31) / 32;
495 for (unsigned I = 0; I < N; ++I)
496 if ((mask0[I] & mask1[I]) != mask0[I])
497 return false;
498 return true;
499 }
500
501 unsigned
getRegSizeInBits(Register Reg,const MachineRegisterInfo & MRI) const502 TargetRegisterInfo::getRegSizeInBits(Register Reg,
503 const MachineRegisterInfo &MRI) const {
504 const TargetRegisterClass *RC{};
505 if (Reg.isPhysical()) {
506 // The size is not directly available for physical registers.
507 // Instead, we need to access a register class that contains Reg and
508 // get the size of that register class.
509 RC = getMinimalPhysRegClass(Reg);
510 } else {
511 LLT Ty = MRI.getType(Reg);
512 unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
513 // If Reg is not a generic register, query the register class to
514 // get its size.
515 if (RegSize)
516 return RegSize;
517 // Since Reg is not a generic register, it must have a register class.
518 RC = MRI.getRegClass(Reg);
519 }
520 assert(RC && "Unable to deduce the register class");
521 return getRegSizeInBits(*RC);
522 }
523
getCoveringSubRegIndexes(const MachineRegisterInfo & MRI,const TargetRegisterClass * RC,LaneBitmask LaneMask,SmallVectorImpl<unsigned> & NeededIndexes) const524 bool TargetRegisterInfo::getCoveringSubRegIndexes(
525 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC,
526 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const {
527 SmallVector<unsigned, 8> PossibleIndexes;
528 unsigned BestIdx = 0;
529 unsigned BestCover = 0;
530
531 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
532 // Is this index even compatible with the given class?
533 if (getSubClassWithSubReg(RC, Idx) != RC)
534 continue;
535 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
536 // Early exit if we found a perfect match.
537 if (SubRegMask == LaneMask) {
538 BestIdx = Idx;
539 break;
540 }
541
542 // The index must not cover any lanes outside \p LaneMask.
543 if ((SubRegMask & ~LaneMask).any())
544 continue;
545
546 unsigned PopCount = SubRegMask.getNumLanes();
547 PossibleIndexes.push_back(Idx);
548 if (PopCount > BestCover) {
549 BestCover = PopCount;
550 BestIdx = Idx;
551 }
552 }
553
554 // Abort if we cannot possibly implement the COPY with the given indexes.
555 if (BestIdx == 0)
556 return false;
557
558 NeededIndexes.push_back(BestIdx);
559
560 // Greedy heuristic: Keep iterating keeping the best covering subreg index
561 // each time.
562 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx);
563 while (LanesLeft.any()) {
564 unsigned BestIdx = 0;
565 int BestCover = std::numeric_limits<int>::min();
566 for (unsigned Idx : PossibleIndexes) {
567 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
568 // Early exit if we found a perfect match.
569 if (SubRegMask == LanesLeft) {
570 BestIdx = Idx;
571 break;
572 }
573
574 // Do not cover already-covered lanes to avoid creating cycles
575 // in copy bundles (= bundle contains copies that write to the
576 // registers).
577 if ((SubRegMask & ~LanesLeft).any())
578 continue;
579
580 // Try to cover as many of the remaining lanes as possible.
581 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
582 if (Cover > BestCover) {
583 BestCover = Cover;
584 BestIdx = Idx;
585 }
586 }
587
588 if (BestIdx == 0)
589 return false; // Impossible to handle
590
591 NeededIndexes.push_back(BestIdx);
592
593 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx);
594 }
595
596 return BestIdx;
597 }
598
599 Register
lookThruCopyLike(Register SrcReg,const MachineRegisterInfo * MRI) const600 TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
601 const MachineRegisterInfo *MRI) const {
602 while (true) {
603 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
604 if (!MI->isCopyLike())
605 return SrcReg;
606
607 Register CopySrcReg;
608 if (MI->isCopy())
609 CopySrcReg = MI->getOperand(1).getReg();
610 else {
611 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
612 CopySrcReg = MI->getOperand(2).getReg();
613 }
614
615 if (!CopySrcReg.isVirtual())
616 return CopySrcReg;
617
618 SrcReg = CopySrcReg;
619 }
620 }
621
lookThruSingleUseCopyChain(Register SrcReg,const MachineRegisterInfo * MRI) const622 Register TargetRegisterInfo::lookThruSingleUseCopyChain(
623 Register SrcReg, const MachineRegisterInfo *MRI) const {
624 while (true) {
625 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
626 // Found the real definition, return it if it has a single use.
627 if (!MI->isCopyLike())
628 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register();
629
630 Register CopySrcReg;
631 if (MI->isCopy())
632 CopySrcReg = MI->getOperand(1).getReg();
633 else {
634 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
635 CopySrcReg = MI->getOperand(2).getReg();
636 }
637
638 // Continue only if the next definition in the chain is for a virtual
639 // register that has a single use.
640 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg))
641 return Register();
642
643 SrcReg = CopySrcReg;
644 }
645 }
646
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const647 void TargetRegisterInfo::getOffsetOpcodes(
648 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
649 assert(!Offset.getScalable() && "Scalable offsets are not handled");
650 DIExpression::appendOffset(Ops, Offset.getFixed());
651 }
652
653 DIExpression *
prependOffsetExpression(const DIExpression * Expr,unsigned PrependFlags,const StackOffset & Offset) const654 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
655 unsigned PrependFlags,
656 const StackOffset &Offset) const {
657 assert((PrependFlags &
658 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
659 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
660 "Unsupported prepend flag");
661 SmallVector<uint64_t, 16> OffsetExpr;
662 if (PrependFlags & DIExpression::DerefBefore)
663 OffsetExpr.push_back(dwarf::DW_OP_deref);
664 getOffsetOpcodes(Offset, OffsetExpr);
665 if (PrependFlags & DIExpression::DerefAfter)
666 OffsetExpr.push_back(dwarf::DW_OP_deref);
667 return DIExpression::prependOpcodes(Expr, OffsetExpr,
668 PrependFlags & DIExpression::StackValue,
669 PrependFlags & DIExpression::EntryValue);
670 }
671
672 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
673 LLVM_DUMP_METHOD
dumpReg(Register Reg,unsigned SubRegIndex,const TargetRegisterInfo * TRI)674 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
675 const TargetRegisterInfo *TRI) {
676 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
677 }
678 #endif
679