1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetRegisterInfo interface.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/LiveInterval.h"
23 #include "llvm/CodeGen/TargetFrameLowering.h"
24 #include "llvm/CodeGen/TargetInstrInfo.h"
25 #include "llvm/CodeGen/TargetSubtargetInfo.h"
26 #include "llvm/CodeGen/VirtRegMap.h"
27 #include "llvm/Config/llvm-config.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/DebugInfoMetadata.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/MC/MCRegisterInfo.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/MachineValueType.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Printable.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <cassert>
40 #include <utility>
41
42 #define DEBUG_TYPE "target-reg-info"
43
44 using namespace llvm;
45
46 static cl::opt<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
50 cl::init(5000));
51
TargetRegisterInfo(const TargetRegisterInfoDesc * ID,regclass_iterator RCB,regclass_iterator RCE,const char * const * SRINames,const LaneBitmask * SRILaneMasks,LaneBitmask SRICoveringLanes,const RegClassInfo * const RCIs,unsigned Mode)52 TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
53 regclass_iterator RCB, regclass_iterator RCE,
54 const char *const *SRINames,
55 const LaneBitmask *SRILaneMasks,
56 LaneBitmask SRICoveringLanes,
57 const RegClassInfo *const RCIs,
58 unsigned Mode)
59 : InfoDesc(ID), SubRegIndexNames(SRINames),
60 SubRegIndexLaneMasks(SRILaneMasks),
61 RegClassBegin(RCB), RegClassEnd(RCE),
62 CoveringLanes(SRICoveringLanes),
63 RCInfos(RCIs), HwMode(Mode) {
64 }
65
66 TargetRegisterInfo::~TargetRegisterInfo() = default;
67
shouldRegionSplitForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg) const68 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
69 const MachineFunction &MF, const LiveInterval &VirtReg) const {
70 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
71 const MachineRegisterInfo &MRI = MF.getRegInfo();
72 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg());
73 if (MI && TII->isTriviallyReMaterializable(*MI) &&
74 VirtReg.size() > HugeSizeForSplit)
75 return false;
76 return true;
77 }
78
markSuperRegs(BitVector & RegisterSet,MCRegister Reg) const79 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
80 MCRegister Reg) const {
81 for (MCSuperRegIterator AI(Reg, this, true); AI.isValid(); ++AI)
82 RegisterSet.set(*AI);
83 }
84
checkAllSuperRegsMarked(const BitVector & RegisterSet,ArrayRef<MCPhysReg> Exceptions) const85 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
86 ArrayRef<MCPhysReg> Exceptions) const {
87 // Check that all super registers of reserved regs are reserved as well.
88 BitVector Checked(getNumRegs());
89 for (unsigned Reg : RegisterSet.set_bits()) {
90 if (Checked[Reg])
91 continue;
92 for (MCSuperRegIterator SR(Reg, this); SR.isValid(); ++SR) {
93 if (!RegisterSet[*SR] && !is_contained(Exceptions, Reg)) {
94 dbgs() << "Error: Super register " << printReg(*SR, this)
95 << " of reserved register " << printReg(Reg, this)
96 << " is not reserved.\n";
97 return false;
98 }
99
100 // We transitively check superregs. So we can remember this for later
101 // to avoid compiletime explosion in deep register hierarchies.
102 Checked.set(*SR);
103 }
104 }
105 return true;
106 }
107
108 namespace llvm {
109
printReg(Register Reg,const TargetRegisterInfo * TRI,unsigned SubIdx,const MachineRegisterInfo * MRI)110 Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
111 unsigned SubIdx, const MachineRegisterInfo *MRI) {
112 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
113 if (!Reg)
114 OS << "$noreg";
115 else if (Register::isStackSlot(Reg))
116 OS << "SS#" << Register::stackSlot2Index(Reg);
117 else if (Register::isVirtualRegister(Reg)) {
118 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
119 if (Name != "") {
120 OS << '%' << Name;
121 } else {
122 OS << '%' << Register::virtReg2Index(Reg);
123 }
124 } else if (!TRI)
125 OS << '$' << "physreg" << Reg;
126 else if (Reg < TRI->getNumRegs()) {
127 OS << '$';
128 printLowerCase(TRI->getName(Reg), OS);
129 } else
130 llvm_unreachable("Register kind is unsupported.");
131
132 if (SubIdx) {
133 if (TRI)
134 OS << ':' << TRI->getSubRegIndexName(SubIdx);
135 else
136 OS << ":sub(" << SubIdx << ')';
137 }
138 });
139 }
140
printRegUnit(unsigned Unit,const TargetRegisterInfo * TRI)141 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
142 return Printable([Unit, TRI](raw_ostream &OS) {
143 // Generic printout when TRI is missing.
144 if (!TRI) {
145 OS << "Unit~" << Unit;
146 return;
147 }
148
149 // Check for invalid register units.
150 if (Unit >= TRI->getNumRegUnits()) {
151 OS << "BadUnit~" << Unit;
152 return;
153 }
154
155 // Normal units have at least one root.
156 MCRegUnitRootIterator Roots(Unit, TRI);
157 assert(Roots.isValid() && "Unit has no roots.");
158 OS << TRI->getName(*Roots);
159 for (++Roots; Roots.isValid(); ++Roots)
160 OS << '~' << TRI->getName(*Roots);
161 });
162 }
163
printVRegOrUnit(unsigned Unit,const TargetRegisterInfo * TRI)164 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
165 return Printable([Unit, TRI](raw_ostream &OS) {
166 if (Register::isVirtualRegister(Unit)) {
167 OS << '%' << Register::virtReg2Index(Unit);
168 } else {
169 OS << printRegUnit(Unit, TRI);
170 }
171 });
172 }
173
printRegClassOrBank(Register Reg,const MachineRegisterInfo & RegInfo,const TargetRegisterInfo * TRI)174 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
175 const TargetRegisterInfo *TRI) {
176 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
177 if (RegInfo.getRegClassOrNull(Reg))
178 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
179 else if (RegInfo.getRegBankOrNull(Reg))
180 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
181 else {
182 OS << "_";
183 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
184 "Generic registers must have a valid type");
185 }
186 });
187 }
188
189 } // end namespace llvm
190
191 /// getAllocatableClass - Return the maximal subclass of the given register
192 /// class that is alloctable, or NULL.
193 const TargetRegisterClass *
getAllocatableClass(const TargetRegisterClass * RC) const194 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
195 if (!RC || RC->isAllocatable())
196 return RC;
197
198 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
199 ++It) {
200 const TargetRegisterClass *SubRC = getRegClass(It.getID());
201 if (SubRC->isAllocatable())
202 return SubRC;
203 }
204 return nullptr;
205 }
206
207 /// getMinimalPhysRegClass - Returns the Register Class of a physical
208 /// register of the given type, picking the most sub register class of
209 /// the right type that contains this physreg.
210 const TargetRegisterClass *
getMinimalPhysRegClass(MCRegister reg,MVT VT) const211 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const {
212 assert(Register::isPhysicalRegister(reg) &&
213 "reg must be a physical register");
214
215 // Pick the most sub register class of the right type that contains
216 // this physreg.
217 const TargetRegisterClass* BestRC = nullptr;
218 for (const TargetRegisterClass* RC : regclasses()) {
219 if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) &&
220 RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC)))
221 BestRC = RC;
222 }
223
224 assert(BestRC && "Couldn't find the register class");
225 return BestRC;
226 }
227
228 const TargetRegisterClass *
getMinimalPhysRegClassLLT(MCRegister reg,LLT Ty) const229 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const {
230 assert(Register::isPhysicalRegister(reg) &&
231 "reg must be a physical register");
232
233 // Pick the most sub register class of the right type that contains
234 // this physreg.
235 const TargetRegisterClass *BestRC = nullptr;
236 for (const TargetRegisterClass *RC : regclasses()) {
237 if ((!Ty.isValid() || isTypeLegalForClass(*RC, Ty)) && RC->contains(reg) &&
238 (!BestRC || BestRC->hasSubClass(RC)))
239 BestRC = RC;
240 }
241
242 return BestRC;
243 }
244
245 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
246 /// registers for the specific register class.
getAllocatableSetForRC(const MachineFunction & MF,const TargetRegisterClass * RC,BitVector & R)247 static void getAllocatableSetForRC(const MachineFunction &MF,
248 const TargetRegisterClass *RC, BitVector &R){
249 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
250 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
251 for (unsigned i = 0; i != Order.size(); ++i)
252 R.set(Order[i]);
253 }
254
getAllocatableSet(const MachineFunction & MF,const TargetRegisterClass * RC) const255 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
256 const TargetRegisterClass *RC) const {
257 BitVector Allocatable(getNumRegs());
258 if (RC) {
259 // A register class with no allocatable subclass returns an empty set.
260 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
261 if (SubClass)
262 getAllocatableSetForRC(MF, SubClass, Allocatable);
263 } else {
264 for (const TargetRegisterClass *C : regclasses())
265 if (C->isAllocatable())
266 getAllocatableSetForRC(MF, C, Allocatable);
267 }
268
269 // Mask out the reserved registers
270 const MachineRegisterInfo &MRI = MF.getRegInfo();
271 const BitVector &Reserved = MRI.getReservedRegs();
272 Allocatable.reset(Reserved);
273
274 return Allocatable;
275 }
276
277 static inline
firstCommonClass(const uint32_t * A,const uint32_t * B,const TargetRegisterInfo * TRI)278 const TargetRegisterClass *firstCommonClass(const uint32_t *A,
279 const uint32_t *B,
280 const TargetRegisterInfo *TRI) {
281 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
282 if (unsigned Common = *A++ & *B++)
283 return TRI->getRegClass(I + countTrailingZeros(Common));
284 return nullptr;
285 }
286
287 const TargetRegisterClass *
getCommonSubClass(const TargetRegisterClass * A,const TargetRegisterClass * B) const288 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
289 const TargetRegisterClass *B) const {
290 // First take care of the trivial cases.
291 if (A == B)
292 return A;
293 if (!A || !B)
294 return nullptr;
295
296 // Register classes are ordered topologically, so the largest common
297 // sub-class it the common sub-class with the smallest ID.
298 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
299 }
300
301 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned Idx) const302 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
303 const TargetRegisterClass *B,
304 unsigned Idx) const {
305 assert(A && B && "Missing register class");
306 assert(Idx && "Bad sub-register index");
307
308 // Find Idx in the list of super-register indices.
309 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
310 if (RCI.getSubReg() == Idx)
311 // The bit mask contains all register classes that are projected into B
312 // by Idx. Find a class that is also a sub-class of A.
313 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
314 return nullptr;
315 }
316
317 const TargetRegisterClass *TargetRegisterInfo::
getCommonSuperRegClass(const TargetRegisterClass * RCA,unsigned SubA,const TargetRegisterClass * RCB,unsigned SubB,unsigned & PreA,unsigned & PreB) const318 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
319 const TargetRegisterClass *RCB, unsigned SubB,
320 unsigned &PreA, unsigned &PreB) const {
321 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
322
323 // Search all pairs of sub-register indices that project into RCA and RCB
324 // respectively. This is quadratic, but usually the sets are very small. On
325 // most targets like X86, there will only be a single sub-register index
326 // (e.g., sub_16bit projecting into GR16).
327 //
328 // The worst case is a register class like DPR on ARM.
329 // We have indices dsub_0..dsub_7 projecting into that class.
330 //
331 // It is very common that one register class is a sub-register of the other.
332 // Arrange for RCA to be the larger register so the answer will be found in
333 // the first iteration. This makes the search linear for the most common
334 // case.
335 const TargetRegisterClass *BestRC = nullptr;
336 unsigned *BestPreA = &PreA;
337 unsigned *BestPreB = &PreB;
338 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
339 std::swap(RCA, RCB);
340 std::swap(SubA, SubB);
341 std::swap(BestPreA, BestPreB);
342 }
343
344 // Also terminate the search one we have found a register class as small as
345 // RCA.
346 unsigned MinSize = getRegSizeInBits(*RCA);
347
348 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
349 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
350 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
351 // Check if a common super-register class exists for this index pair.
352 const TargetRegisterClass *RC =
353 firstCommonClass(IA.getMask(), IB.getMask(), this);
354 if (!RC || getRegSizeInBits(*RC) < MinSize)
355 continue;
356
357 // The indexes must compose identically: PreA+SubA == PreB+SubB.
358 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
359 if (FinalA != FinalB)
360 continue;
361
362 // Is RC a better candidate than BestRC?
363 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
364 continue;
365
366 // Yes, RC is the smallest super-register seen so far.
367 BestRC = RC;
368 *BestPreA = IA.getSubReg();
369 *BestPreB = IB.getSubReg();
370
371 // Bail early if we reached MinSize. We won't find a better candidate.
372 if (getRegSizeInBits(*BestRC) == MinSize)
373 return BestRC;
374 }
375 }
376 return BestRC;
377 }
378
379 /// Check if the registers defined by the pair (RegisterClass, SubReg)
380 /// share the same register file.
shareSameRegisterFile(const TargetRegisterInfo & TRI,const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg)381 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
382 const TargetRegisterClass *DefRC,
383 unsigned DefSubReg,
384 const TargetRegisterClass *SrcRC,
385 unsigned SrcSubReg) {
386 // Same register class.
387 if (DefRC == SrcRC)
388 return true;
389
390 // Both operands are sub registers. Check if they share a register class.
391 unsigned SrcIdx, DefIdx;
392 if (SrcSubReg && DefSubReg) {
393 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
394 SrcIdx, DefIdx) != nullptr;
395 }
396
397 // At most one of the register is a sub register, make it Src to avoid
398 // duplicating the test.
399 if (!SrcSubReg) {
400 std::swap(DefSubReg, SrcSubReg);
401 std::swap(DefRC, SrcRC);
402 }
403
404 // One of the register is a sub register, check if we can get a superclass.
405 if (SrcSubReg)
406 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
407
408 // Plain copy.
409 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
410 }
411
shouldRewriteCopySrc(const TargetRegisterClass * DefRC,unsigned DefSubReg,const TargetRegisterClass * SrcRC,unsigned SrcSubReg) const412 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
413 unsigned DefSubReg,
414 const TargetRegisterClass *SrcRC,
415 unsigned SrcSubReg) const {
416 // If this source does not incur a cross register bank copy, use it.
417 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg);
418 }
419
420 // Compute target-independent register allocator hints to help eliminate copies.
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const421 bool TargetRegisterInfo::getRegAllocationHints(
422 Register VirtReg, ArrayRef<MCPhysReg> Order,
423 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
424 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
425 const MachineRegisterInfo &MRI = MF.getRegInfo();
426 const std::pair<Register, SmallVector<Register, 4>> &Hints_MRI =
427 MRI.getRegAllocationHints(VirtReg);
428
429 SmallSet<Register, 32> HintedRegs;
430 // First hint may be a target hint.
431 bool Skip = (Hints_MRI.first != 0);
432 for (auto Reg : Hints_MRI.second) {
433 if (Skip) {
434 Skip = false;
435 continue;
436 }
437
438 // Target-independent hints are either a physical or a virtual register.
439 Register Phys = Reg;
440 if (VRM && Phys.isVirtual())
441 Phys = VRM->getPhys(Phys);
442
443 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
444 // registers allocated to the same physreg).
445 if (!HintedRegs.insert(Phys).second)
446 continue;
447 // Check that Phys is a valid hint in VirtReg's register class.
448 if (!Phys.isPhysical())
449 continue;
450 if (MRI.isReserved(Phys))
451 continue;
452 // Check that Phys is in the allocation order. We shouldn't heed hints
453 // from VirtReg's register class if they aren't in the allocation order. The
454 // target probably has a reason for removing the register.
455 if (!is_contained(Order, Phys))
456 continue;
457
458 // All clear, tell the register allocator to prefer this register.
459 Hints.push_back(Phys);
460 }
461 return false;
462 }
463
isCalleeSavedPhysReg(MCRegister PhysReg,const MachineFunction & MF) const464 bool TargetRegisterInfo::isCalleeSavedPhysReg(
465 MCRegister PhysReg, const MachineFunction &MF) const {
466 if (PhysReg == 0)
467 return false;
468 const uint32_t *callerPreservedRegs =
469 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
470 if (callerPreservedRegs) {
471 assert(Register::isPhysicalRegister(PhysReg) &&
472 "Expected physical register");
473 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
474 }
475 return false;
476 }
477
canRealignStack(const MachineFunction & MF) const478 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
479 return !MF.getFunction().hasFnAttribute("no-realign-stack");
480 }
481
shouldRealignStack(const MachineFunction & MF) const482 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
483 const MachineFrameInfo &MFI = MF.getFrameInfo();
484 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
485 const Function &F = MF.getFunction();
486 return F.hasFnAttribute("stackrealign") ||
487 (MFI.getMaxAlign() > TFI->getStackAlign()) ||
488 F.hasFnAttribute(Attribute::StackAlignment);
489 }
490
regmaskSubsetEqual(const uint32_t * mask0,const uint32_t * mask1) const491 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
492 const uint32_t *mask1) const {
493 unsigned N = (getNumRegs()+31) / 32;
494 for (unsigned I = 0; I < N; ++I)
495 if ((mask0[I] & mask1[I]) != mask0[I])
496 return false;
497 return true;
498 }
499
500 unsigned
getRegSizeInBits(Register Reg,const MachineRegisterInfo & MRI) const501 TargetRegisterInfo::getRegSizeInBits(Register Reg,
502 const MachineRegisterInfo &MRI) const {
503 const TargetRegisterClass *RC{};
504 if (Reg.isPhysical()) {
505 // The size is not directly available for physical registers.
506 // Instead, we need to access a register class that contains Reg and
507 // get the size of that register class.
508 RC = getMinimalPhysRegClass(Reg);
509 } else {
510 LLT Ty = MRI.getType(Reg);
511 unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
512 // If Reg is not a generic register, query the register class to
513 // get its size.
514 if (RegSize)
515 return RegSize;
516 // Since Reg is not a generic register, it must have a register class.
517 RC = MRI.getRegClass(Reg);
518 }
519 assert(RC && "Unable to deduce the register class");
520 return getRegSizeInBits(*RC);
521 }
522
getCoveringSubRegIndexes(const MachineRegisterInfo & MRI,const TargetRegisterClass * RC,LaneBitmask LaneMask,SmallVectorImpl<unsigned> & NeededIndexes) const523 bool TargetRegisterInfo::getCoveringSubRegIndexes(
524 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC,
525 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const {
526 SmallVector<unsigned, 8> PossibleIndexes;
527 unsigned BestIdx = 0;
528 unsigned BestCover = 0;
529
530 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
531 // Is this index even compatible with the given class?
532 if (getSubClassWithSubReg(RC, Idx) != RC)
533 continue;
534 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
535 // Early exit if we found a perfect match.
536 if (SubRegMask == LaneMask) {
537 BestIdx = Idx;
538 break;
539 }
540
541 // The index must not cover any lanes outside \p LaneMask.
542 if ((SubRegMask & ~LaneMask).any())
543 continue;
544
545 unsigned PopCount = SubRegMask.getNumLanes();
546 PossibleIndexes.push_back(Idx);
547 if (PopCount > BestCover) {
548 BestCover = PopCount;
549 BestIdx = Idx;
550 }
551 }
552
553 // Abort if we cannot possibly implement the COPY with the given indexes.
554 if (BestIdx == 0)
555 return 0;
556
557 NeededIndexes.push_back(BestIdx);
558
559 // Greedy heuristic: Keep iterating keeping the best covering subreg index
560 // each time.
561 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx);
562 while (LanesLeft.any()) {
563 unsigned BestIdx = 0;
564 int BestCover = std::numeric_limits<int>::min();
565 for (unsigned Idx : PossibleIndexes) {
566 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
567 // Early exit if we found a perfect match.
568 if (SubRegMask == LanesLeft) {
569 BestIdx = Idx;
570 break;
571 }
572
573 // Try to cover as much of the remaining lanes as possible but
574 // as few of the already covered lanes as possible.
575 int Cover = (SubRegMask & LanesLeft).getNumLanes() -
576 (SubRegMask & ~LanesLeft).getNumLanes();
577 if (Cover > BestCover) {
578 BestCover = Cover;
579 BestIdx = Idx;
580 }
581 }
582
583 if (BestIdx == 0)
584 return 0; // Impossible to handle
585
586 NeededIndexes.push_back(BestIdx);
587
588 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx);
589 }
590
591 return BestIdx;
592 }
593
594 Register
lookThruCopyLike(Register SrcReg,const MachineRegisterInfo * MRI) const595 TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
596 const MachineRegisterInfo *MRI) const {
597 while (true) {
598 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
599 if (!MI->isCopyLike())
600 return SrcReg;
601
602 Register CopySrcReg;
603 if (MI->isCopy())
604 CopySrcReg = MI->getOperand(1).getReg();
605 else {
606 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
607 CopySrcReg = MI->getOperand(2).getReg();
608 }
609
610 if (!CopySrcReg.isVirtual())
611 return CopySrcReg;
612
613 SrcReg = CopySrcReg;
614 }
615 }
616
lookThruSingleUseCopyChain(Register SrcReg,const MachineRegisterInfo * MRI) const617 Register TargetRegisterInfo::lookThruSingleUseCopyChain(
618 Register SrcReg, const MachineRegisterInfo *MRI) const {
619 while (true) {
620 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
621 // Found the real definition, return it if it has a single use.
622 if (!MI->isCopyLike())
623 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register();
624
625 Register CopySrcReg;
626 if (MI->isCopy())
627 CopySrcReg = MI->getOperand(1).getReg();
628 else {
629 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
630 CopySrcReg = MI->getOperand(2).getReg();
631 }
632
633 // Continue only if the next definition in the chain is for a virtual
634 // register that has a single use.
635 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg))
636 return Register();
637
638 SrcReg = CopySrcReg;
639 }
640 }
641
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const642 void TargetRegisterInfo::getOffsetOpcodes(
643 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
644 assert(!Offset.getScalable() && "Scalable offsets are not handled");
645 DIExpression::appendOffset(Ops, Offset.getFixed());
646 }
647
648 DIExpression *
prependOffsetExpression(const DIExpression * Expr,unsigned PrependFlags,const StackOffset & Offset) const649 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
650 unsigned PrependFlags,
651 const StackOffset &Offset) const {
652 assert((PrependFlags &
653 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
654 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
655 "Unsupported prepend flag");
656 SmallVector<uint64_t, 16> OffsetExpr;
657 if (PrependFlags & DIExpression::DerefBefore)
658 OffsetExpr.push_back(dwarf::DW_OP_deref);
659 getOffsetOpcodes(Offset, OffsetExpr);
660 if (PrependFlags & DIExpression::DerefAfter)
661 OffsetExpr.push_back(dwarf::DW_OP_deref);
662 return DIExpression::prependOpcodes(Expr, OffsetExpr,
663 PrependFlags & DIExpression::StackValue,
664 PrependFlags & DIExpression::EntryValue);
665 }
666
667 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
668 LLVM_DUMP_METHOD
dumpReg(Register Reg,unsigned SubRegIndex,const TargetRegisterInfo * TRI)669 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
670 const TargetRegisterInfo *TRI) {
671 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
672 }
673 #endif
674