1 //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes an abstract interface used to get information about a
10 // target machines register file.  This information is used for a variety of
11 // purposed, especially register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
16 #define LLVM_CODEGEN_TARGETREGISTERINFO_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/RegisterBank.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/MC/LaneBitmask.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Support/Printable.h"
30 #include <cassert>
31 #include <cstdint>
32 
33 namespace llvm {
34 
35 class BitVector;
36 class DIExpression;
37 class LiveRegMatrix;
38 class MachineFunction;
39 class MachineInstr;
40 class RegScavenger;
41 class VirtRegMap;
42 class LiveIntervals;
43 class LiveInterval;
44 
45 class TargetRegisterClass {
46 public:
47   using iterator = const MCPhysReg *;
48   using const_iterator = const MCPhysReg *;
49   using sc_iterator = const TargetRegisterClass* const *;
50 
51   // Instance variables filled by tablegen, do not use!
52   const MCRegisterClass *MC;
53   const uint32_t *SubClassMask;
54   const uint16_t *SuperRegIndices;
55   const LaneBitmask LaneMask;
56   /// Classes with a higher priority value are assigned first by register
57   /// allocators using a greedy heuristic. The value is in the range [0,31].
58   const uint8_t AllocationPriority;
59 
60   // Change allocation priority heuristic used by greedy.
61   const bool GlobalPriority;
62 
63   /// Configurable target specific flags.
64   const uint8_t TSFlags;
65   /// Whether the class supports two (or more) disjunct subregister indices.
66   const bool HasDisjunctSubRegs;
67   /// Whether a combination of subregisters can cover every register in the
68   /// class. See also the CoveredBySubRegs description in Target.td.
69   const bool CoveredBySubRegs;
70   const sc_iterator SuperClasses;
71   ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
72 
73   /// Return the register class ID number.
74   unsigned getID() const { return MC->getID(); }
75 
76   /// begin/end - Return all of the registers in this class.
77   ///
78   iterator       begin() const { return MC->begin(); }
79   iterator         end() const { return MC->end(); }
80 
81   /// Return the number of registers in this class.
82   unsigned getNumRegs() const { return MC->getNumRegs(); }
83 
84   iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
85   getRegisters() const {
86     return make_range(MC->begin(), MC->end());
87   }
88 
89   /// Return the specified register in the class.
90   MCRegister getRegister(unsigned i) const {
91     return MC->getRegister(i);
92   }
93 
94   /// Return true if the specified register is included in this register class.
95   /// This does not include virtual registers.
96   bool contains(Register Reg) const {
97     /// FIXME: Historically this function has returned false when given vregs
98     ///        but it should probably only receive physical registers
99     if (!Reg.isPhysical())
100       return false;
101     return MC->contains(Reg.asMCReg());
102   }
103 
104   /// Return true if both registers are in this class.
105   bool contains(Register Reg1, Register Reg2) const {
106     /// FIXME: Historically this function has returned false when given a vregs
107     ///        but it should probably only receive physical registers
108     if (!Reg1.isPhysical() || !Reg2.isPhysical())
109       return false;
110     return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
111   }
112 
113   /// Return the cost of copying a value between two registers in this class.
114   /// A negative number means the register class is very expensive
115   /// to copy e.g. status flag register classes.
116   int getCopyCost() const { return MC->getCopyCost(); }
117 
118   /// Return true if this register class may be used to create virtual
119   /// registers.
120   bool isAllocatable() const { return MC->isAllocatable(); }
121 
122   /// Return true if the specified TargetRegisterClass
123   /// is a proper sub-class of this TargetRegisterClass.
124   bool hasSubClass(const TargetRegisterClass *RC) const {
125     return RC != this && hasSubClassEq(RC);
126   }
127 
128   /// Returns true if RC is a sub-class of or equal to this class.
129   bool hasSubClassEq(const TargetRegisterClass *RC) const {
130     unsigned ID = RC->getID();
131     return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
132   }
133 
134   /// Return true if the specified TargetRegisterClass is a
135   /// proper super-class of this TargetRegisterClass.
136   bool hasSuperClass(const TargetRegisterClass *RC) const {
137     return RC->hasSubClass(this);
138   }
139 
140   /// Returns true if RC is a super-class of or equal to this class.
141   bool hasSuperClassEq(const TargetRegisterClass *RC) const {
142     return RC->hasSubClassEq(this);
143   }
144 
145   /// Returns a bit vector of subclasses, including this one.
146   /// The vector is indexed by class IDs.
147   ///
148   /// To use it, consider the returned array as a chunk of memory that
149   /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
150   /// contains a bitset of the ID of the subclasses in big-endian style.
151 
152   /// I.e., the representation of the memory from left to right at the
153   /// bit level looks like:
154   /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
155   ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
156   /// Where the number represents the class ID and XXX bits that
157   /// should be ignored.
158   ///
159   /// See the implementation of hasSubClassEq for an example of how it
160   /// can be used.
161   const uint32_t *getSubClassMask() const {
162     return SubClassMask;
163   }
164 
165   /// Returns a 0-terminated list of sub-register indices that project some
166   /// super-register class into this register class. The list has an entry for
167   /// each Idx such that:
168   ///
169   ///   There exists SuperRC where:
170   ///     For all Reg in SuperRC:
171   ///       this->contains(Reg:Idx)
172   const uint16_t *getSuperRegIndices() const {
173     return SuperRegIndices;
174   }
175 
176   /// Returns a NULL-terminated list of super-classes.  The
177   /// classes are ordered by ID which is also a topological ordering from large
178   /// to small classes.  The list does NOT include the current class.
179   sc_iterator getSuperClasses() const {
180     return SuperClasses;
181   }
182 
183   /// Return true if this TargetRegisterClass is a subset
184   /// class of at least one other TargetRegisterClass.
185   bool isASubClass() const {
186     return SuperClasses[0] != nullptr;
187   }
188 
189   /// Returns the preferred order for allocating registers from this register
190   /// class in MF. The raw order comes directly from the .td file and may
191   /// include reserved registers that are not allocatable.
192   /// Register allocators should also make sure to allocate
193   /// callee-saved registers only after all the volatiles are used. The
194   /// RegisterClassInfo class provides filtered allocation orders with
195   /// callee-saved registers moved to the end.
196   ///
197   /// The MachineFunction argument can be used to tune the allocatable
198   /// registers based on the characteristics of the function, subtarget, or
199   /// other criteria.
200   ///
201   /// By default, this method returns all registers in the class.
202   ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
203     return OrderFunc ? OrderFunc(MF) : ArrayRef(begin(), getNumRegs());
204   }
205 
206   /// Returns the combination of all lane masks of register in this class.
207   /// The lane masks of the registers are the combination of all lane masks
208   /// of their subregisters. Returns 1 if there are no subregisters.
209   LaneBitmask getLaneMask() const {
210     return LaneMask;
211   }
212 };
213 
214 /// Extra information, not in MCRegisterDesc, about registers.
215 /// These are used by codegen, not by MC.
216 struct TargetRegisterInfoDesc {
217   const uint8_t *CostPerUse; // Extra cost of instructions using register.
218   unsigned NumCosts; // Number of cost values associated with each register.
219   const bool
220       *InAllocatableClass; // Register belongs to an allocatable regclass.
221 };
222 
223 /// Each TargetRegisterClass has a per register weight, and weight
224 /// limit which must be less than the limits of its pressure sets.
225 struct RegClassWeight {
226   unsigned RegWeight;
227   unsigned WeightLimit;
228 };
229 
230 /// TargetRegisterInfo base class - We assume that the target defines a static
231 /// array of TargetRegisterDesc objects that represent all of the machine
232 /// registers that the target has.  As such, we simply have to track a pointer
233 /// to this array so that we can turn register number into a register
234 /// descriptor.
235 ///
236 class TargetRegisterInfo : public MCRegisterInfo {
237 public:
238   using regclass_iterator = const TargetRegisterClass * const *;
239   using vt_iterator = const MVT::SimpleValueType *;
240   struct RegClassInfo {
241     unsigned RegSize, SpillSize, SpillAlignment;
242     vt_iterator VTList;
243   };
244 private:
245   const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
246   const char *const *SubRegIndexNames;        // Names of subreg indexes.
247   // Pointer to array of lane masks, one per sub-reg index.
248   const LaneBitmask *SubRegIndexLaneMasks;
249 
250   regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
251   LaneBitmask CoveringLanes;
252   const RegClassInfo *const RCInfos;
253   unsigned HwMode;
254 
255 protected:
256   TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
257                      regclass_iterator RCB,
258                      regclass_iterator RCE,
259                      const char *const *SRINames,
260                      const LaneBitmask *SRILaneMasks,
261                      LaneBitmask CoveringLanes,
262                      const RegClassInfo *const RCIs,
263                      unsigned Mode = 0);
264   virtual ~TargetRegisterInfo();
265 
266 public:
267   // Register numbers can represent physical registers, virtual registers, and
268   // sometimes stack slots. The unsigned values are divided into these ranges:
269   //
270   //   0           Not a register, can be used as a sentinel.
271   //   [1;2^30)    Physical registers assigned by TableGen.
272   //   [2^30;2^31) Stack slots. (Rarely used.)
273   //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
274   //
275   // Further sentinels can be allocated from the small negative integers.
276   // DenseMapInfo<unsigned> uses -1u and -2u.
277 
278   /// Return the size in bits of a register from class RC.
279   unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
280     return getRegClassInfo(RC).RegSize;
281   }
282 
283   /// Return the size in bytes of the stack slot allocated to hold a spilled
284   /// copy of a register from class RC.
285   unsigned getSpillSize(const TargetRegisterClass &RC) const {
286     return getRegClassInfo(RC).SpillSize / 8;
287   }
288 
289   /// Return the minimum required alignment in bytes for a spill slot for
290   /// a register of this class.
291   Align getSpillAlign(const TargetRegisterClass &RC) const {
292     return Align(getRegClassInfo(RC).SpillAlignment / 8);
293   }
294 
295   /// Return true if the given TargetRegisterClass has the ValueType T.
296   bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
297     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
298       if (MVT(*I) == T)
299         return true;
300     return false;
301   }
302 
303   /// Return true if the given TargetRegisterClass is compatible with LLT T.
304   bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
305     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
306       MVT VT(*I);
307       if (VT == MVT::Untyped)
308         return true;
309 
310       if (LLT(VT) == T)
311         return true;
312     }
313     return false;
314   }
315 
316   /// Loop over all of the value types that can be represented by values
317   /// in the given register class.
318   vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
319     return getRegClassInfo(RC).VTList;
320   }
321 
322   vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
323     vt_iterator I = legalclasstypes_begin(RC);
324     while (*I != MVT::Other)
325       ++I;
326     return I;
327   }
328 
329   /// Returns the Register Class of a physical register of the given type,
330   /// picking the most sub register class of the right type that contains this
331   /// physreg.
332   const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
333                                                     MVT VT = MVT::Other) const;
334 
335   /// Returns the Register Class of a physical register of the given type,
336   /// picking the most sub register class of the right type that contains this
337   /// physreg. If there is no register class compatible with the given type,
338   /// returns nullptr.
339   const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
340                                                        LLT Ty = LLT()) const;
341 
342   /// Return the maximal subclass of the given register class that is
343   /// allocatable or NULL.
344   const TargetRegisterClass *
345     getAllocatableClass(const TargetRegisterClass *RC) const;
346 
347   /// Returns a bitset indexed by register number indicating if a register is
348   /// allocatable or not. If a register class is specified, returns the subset
349   /// for the class.
350   BitVector getAllocatableSet(const MachineFunction &MF,
351                               const TargetRegisterClass *RC = nullptr) const;
352 
353   /// Get a list of cost values for all registers that correspond to the index
354   /// returned by RegisterCostTableIndex.
355   ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
356     unsigned Idx = getRegisterCostTableIndex(MF);
357     unsigned NumRegs = getNumRegs();
358     assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
359 
360     return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
361   }
362 
363   /// Return true if the register is in the allocation of any register class.
364   bool isInAllocatableClass(MCRegister RegNo) const {
365     return InfoDesc->InAllocatableClass[RegNo];
366   }
367 
368   /// Return the human-readable symbolic target-specific
369   /// name for the specified SubRegIndex.
370   const char *getSubRegIndexName(unsigned SubIdx) const {
371     assert(SubIdx && SubIdx < getNumSubRegIndices() &&
372            "This is not a subregister index");
373     return SubRegIndexNames[SubIdx-1];
374   }
375 
376   /// Return a bitmask representing the parts of a register that are covered by
377   /// SubIdx \see LaneBitmask.
378   ///
379   /// SubIdx == 0 is allowed, it has the lane mask ~0u.
380   LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
381     assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
382     return SubRegIndexLaneMasks[SubIdx];
383   }
384 
385   /// Try to find one or more subregister indexes to cover \p LaneMask.
386   ///
387   /// If this is possible, returns true and appends the best matching set of
388   /// indexes to \p Indexes. If this is not possible, returns false.
389   bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
390                                 const TargetRegisterClass *RC,
391                                 LaneBitmask LaneMask,
392                                 SmallVectorImpl<unsigned> &Indexes) const;
393 
394   /// The lane masks returned by getSubRegIndexLaneMask() above can only be
395   /// used to determine if sub-registers overlap - they can't be used to
396   /// determine if a set of sub-registers completely cover another
397   /// sub-register.
398   ///
399   /// The X86 general purpose registers have two lanes corresponding to the
400   /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
401   /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
402   /// sub_32bit sub-register.
403   ///
404   /// On the other hand, the ARM NEON lanes fully cover their registers: The
405   /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
406   /// This is related to the CoveredBySubRegs property on register definitions.
407   ///
408   /// This function returns a bit mask of lanes that completely cover their
409   /// sub-registers. More precisely, given:
410   ///
411   ///   Covering = getCoveringLanes();
412   ///   MaskA = getSubRegIndexLaneMask(SubA);
413   ///   MaskB = getSubRegIndexLaneMask(SubB);
414   ///
415   /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
416   /// SubB.
417   LaneBitmask getCoveringLanes() const { return CoveringLanes; }
418 
419   /// Returns true if the two registers are equal or alias each other.
420   /// The registers may be virtual registers.
421   bool regsOverlap(Register RegA, Register RegB) const {
422     if (RegA == RegB)
423       return true;
424     if (RegA.isPhysical() && RegB.isPhysical())
425       return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg());
426     return false;
427   }
428 
429   /// Returns true if Reg contains RegUnit.
430   bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
431     for (MCRegUnit Unit : regunits(Reg))
432       if (Register(Unit) == RegUnit)
433         return true;
434     return false;
435   }
436 
437   /// Returns the original SrcReg unless it is the target of a copy-like
438   /// operation, in which case we chain backwards through all such operations
439   /// to the ultimate source register.  If a physical register is encountered,
440   /// we stop the search.
441   virtual Register lookThruCopyLike(Register SrcReg,
442                                     const MachineRegisterInfo *MRI) const;
443 
444   /// Find the original SrcReg unless it is the target of a copy-like operation,
445   /// in which case we chain backwards through all such operations to the
446   /// ultimate source register. If a physical register is encountered, we stop
447   /// the search.
448   /// Return the original SrcReg if all the definitions in the chain only have
449   /// one user and not a physical register.
450   virtual Register
451   lookThruSingleUseCopyChain(Register SrcReg,
452                              const MachineRegisterInfo *MRI) const;
453 
454   /// Return a null-terminated list of all of the callee-saved registers on
455   /// this target. The register should be in the order of desired callee-save
456   /// stack frame offset. The first register is closest to the incoming stack
457   /// pointer if stack grows down, and vice versa.
458   /// Notice: This function does not take into account disabled CSRs.
459   ///         In most cases you will want to use instead the function
460   ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
461   virtual const MCPhysReg*
462   getCalleeSavedRegs(const MachineFunction *MF) const = 0;
463 
464   /// Return a mask of call-preserved registers for the given calling convention
465   /// on the current function. The mask should include all call-preserved
466   /// aliases. This is used by the register allocator to determine which
467   /// registers can be live across a call.
468   ///
469   /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
470   /// A set bit indicates that all bits of the corresponding register are
471   /// preserved across the function call.  The bit mask is expected to be
472   /// sub-register complete, i.e. if A is preserved, so are all its
473   /// sub-registers.
474   ///
475   /// Bits are numbered from the LSB, so the bit for physical register Reg can
476   /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
477   ///
478   /// A NULL pointer means that no register mask will be used, and call
479   /// instructions should use implicit-def operands to indicate call clobbered
480   /// registers.
481   ///
482   virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
483                                                CallingConv::ID) const {
484     // The default mask clobbers everything.  All targets should override.
485     return nullptr;
486   }
487 
488   /// Return a register mask for the registers preserved by the unwinder,
489   /// or nullptr if no custom mask is needed.
490   virtual const uint32_t *
491   getCustomEHPadPreservedMask(const MachineFunction &MF) const {
492     return nullptr;
493   }
494 
495   /// Return a register mask that clobbers everything.
496   virtual const uint32_t *getNoPreservedMask() const {
497     llvm_unreachable("target does not provide no preserved mask");
498   }
499 
500   /// Return a list of all of the registers which are clobbered "inside" a call
501   /// to the given function. For example, these might be needed for PLT
502   /// sequences of long-branch veneers.
503   virtual ArrayRef<MCPhysReg>
504   getIntraCallClobberedRegs(const MachineFunction *MF) const {
505     return {};
506   }
507 
508   /// Return true if all bits that are set in mask \p mask0 are also set in
509   /// \p mask1.
510   bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
511 
512   /// Return all the call-preserved register masks defined for this target.
513   virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
514   virtual ArrayRef<const char *> getRegMaskNames() const = 0;
515 
516   /// Returns a bitset indexed by physical register number indicating if a
517   /// register is a special register that has particular uses and should be
518   /// considered unavailable at all times, e.g. stack pointer, return address.
519   /// A reserved register:
520   /// - is not allocatable
521   /// - is considered always live
522   /// - is ignored by liveness tracking
523   /// It is often necessary to reserve the super registers of a reserved
524   /// register as well, to avoid them getting allocated indirectly. You may use
525   /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
526   virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
527 
528   /// Returns either a string explaining why the given register is reserved for
529   /// this function, or an empty optional if no explanation has been written.
530   /// The absence of an explanation does not mean that the register is not
531   /// reserved (meaning, you should check that PhysReg is in fact reserved
532   /// before calling this).
533   virtual std::optional<std::string>
534   explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
535     return {};
536   }
537 
538   /// Returns false if we can't guarantee that Physreg, specified as an IR asm
539   /// clobber constraint, will be preserved across the statement.
540   virtual bool isAsmClobberable(const MachineFunction &MF,
541                                 MCRegister PhysReg) const {
542     return true;
543   }
544 
545   /// Returns true if PhysReg cannot be written to in inline asm statements.
546   virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
547                                       unsigned PhysReg) const {
548     return false;
549   }
550 
551   /// Returns true if PhysReg is unallocatable and constant throughout the
552   /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
553   virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
554 
555   /// Returns true if the register class is considered divergent.
556   virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
557     return false;
558   }
559 
560   /// Returns true if the register is considered uniform.
561   virtual bool isUniformReg(const MachineRegisterInfo &MRI,
562                             const RegisterBankInfo &RBI, Register Reg) const {
563     return false;
564   }
565 
566   /// Physical registers that may be modified within a function but are
567   /// guaranteed to be restored before any uses. This is useful for targets that
568   /// have call sequences where a GOT register may be updated by the caller
569   /// prior to a call and is guaranteed to be restored (also by the caller)
570   /// after the call.
571   virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
572                                         const MachineFunction &MF) const {
573     return false;
574   }
575 
576   /// This is a wrapper around getCallPreservedMask().
577   /// Return true if the register is preserved after the call.
578   virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
579                                     const MachineFunction &MF) const;
580 
581   /// Returns true if PhysReg can be used as an argument to a function.
582   virtual bool isArgumentRegister(const MachineFunction &MF,
583                                   MCRegister PhysReg) const {
584     return false;
585   }
586 
587   /// Returns true if PhysReg is a fixed register.
588   virtual bool isFixedRegister(const MachineFunction &MF,
589                                MCRegister PhysReg) const {
590     return false;
591   }
592 
593   /// Returns true if PhysReg is a general purpose register.
594   virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
595                                         MCRegister PhysReg) const {
596     return false;
597   }
598 
599   /// Prior to adding the live-out mask to a stackmap or patchpoint
600   /// instruction, provide the target the opportunity to adjust it (mainly to
601   /// remove pseudo-registers that should be ignored).
602   virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
603 
604   /// Return a super-register of the specified register
605   /// Reg so its sub-register of index SubIdx is Reg.
606   MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
607                                  const TargetRegisterClass *RC) const {
608     return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
609   }
610 
611   /// Return a subclass of the specified register
612   /// class A so that each register in it has a sub-register of the
613   /// specified sub-register index which is in the specified register class B.
614   ///
615   /// TableGen will synthesize missing A sub-classes.
616   virtual const TargetRegisterClass *
617   getMatchingSuperRegClass(const TargetRegisterClass *A,
618                            const TargetRegisterClass *B, unsigned Idx) const;
619 
620   // For a copy-like instruction that defines a register of class DefRC with
621   // subreg index DefSubReg, reading from another source with class SrcRC and
622   // subregister SrcSubReg return true if this is a preferable copy
623   // instruction or an earlier use should be used.
624   virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
625                                     unsigned DefSubReg,
626                                     const TargetRegisterClass *SrcRC,
627                                     unsigned SrcSubReg) const;
628 
629   /// Returns the largest legal sub-class of RC that
630   /// supports the sub-register index Idx.
631   /// If no such sub-class exists, return NULL.
632   /// If all registers in RC already have an Idx sub-register, return RC.
633   ///
634   /// TableGen generates a version of this function that is good enough in most
635   /// cases.  Targets can override if they have constraints that TableGen
636   /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
637   /// supported by the full GR32 register class in 64-bit mode, but only by the
638   /// GR32_ABCD regiister class in 32-bit mode.
639   ///
640   /// TableGen will synthesize missing RC sub-classes.
641   virtual const TargetRegisterClass *
642   getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
643     assert(Idx == 0 && "Target has no sub-registers");
644     return RC;
645   }
646 
647   /// Return a register class that can be used for a subregister copy from/into
648   /// \p SuperRC at \p SubRegIdx.
649   virtual const TargetRegisterClass *
650   getSubRegisterClass(const TargetRegisterClass *SuperRC,
651                       unsigned SubRegIdx) const {
652     return nullptr;
653   }
654 
655   /// Return the subregister index you get from composing
656   /// two subregister indices.
657   ///
658   /// The special null sub-register index composes as the identity.
659   ///
660   /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
661   /// returns c. Note that composeSubRegIndices does not tell you about illegal
662   /// compositions. If R does not have a subreg a, or R:a does not have a subreg
663   /// b, composeSubRegIndices doesn't tell you.
664   ///
665   /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
666   /// ssub_0:S0 - ssub_3:S3 subregs.
667   /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
668   unsigned composeSubRegIndices(unsigned a, unsigned b) const {
669     if (!a) return b;
670     if (!b) return a;
671     return composeSubRegIndicesImpl(a, b);
672   }
673 
674   /// Transforms a LaneMask computed for one subregister to the lanemask that
675   /// would have been computed when composing the subsubregisters with IdxA
676   /// first. @sa composeSubRegIndices()
677   LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
678                                          LaneBitmask Mask) const {
679     if (!IdxA)
680       return Mask;
681     return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
682   }
683 
684   /// Transform a lanemask given for a virtual register to the corresponding
685   /// lanemask before using subregister with index \p IdxA.
686   /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
687   /// valie lane mask (no invalid bits set) the following holds:
688   /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
689   /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
690   /// => X1 == Mask
691   LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
692                                                 LaneBitmask LaneMask) const {
693     if (!IdxA)
694       return LaneMask;
695     return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
696   }
697 
698   /// Debugging helper: dump register in human readable form to dbgs() stream.
699   static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
700                       const TargetRegisterInfo *TRI = nullptr);
701 
702   /// Return target defined base register class for a physical register.
703   /// This is the register class with the lowest BaseClassOrder containing the
704   /// register.
705   /// Will be nullptr if the register is not in any base register class.
706   virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const {
707     return nullptr;
708   }
709 
710 protected:
711   /// Overridden by TableGen in targets that have sub-registers.
712   virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
713     llvm_unreachable("Target has no sub-registers");
714   }
715 
716   /// Overridden by TableGen in targets that have sub-registers.
717   virtual LaneBitmask
718   composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
719     llvm_unreachable("Target has no sub-registers");
720   }
721 
722   virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
723                                                             LaneBitmask) const {
724     llvm_unreachable("Target has no sub-registers");
725   }
726 
727   /// Return the register cost table index. This implementation is sufficient
728   /// for most architectures and can be overriden by targets in case there are
729   /// multiple cost values associated with each register.
730   virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
731     return 0;
732   }
733 
734 public:
735   /// Find a common super-register class if it exists.
736   ///
737   /// Find a register class, SuperRC and two sub-register indices, PreA and
738   /// PreB, such that:
739   ///
740   ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
741   ///
742   ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
743   ///
744   ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
745   ///
746   /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
747   /// requirements, and there is no register class with a smaller spill size
748   /// that satisfies the requirements.
749   ///
750   /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
751   ///
752   /// Either of the PreA and PreB sub-register indices may be returned as 0. In
753   /// that case, the returned register class will be a sub-class of the
754   /// corresponding argument register class.
755   ///
756   /// The function returns NULL if no register class can be found.
757   const TargetRegisterClass*
758   getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
759                          const TargetRegisterClass *RCB, unsigned SubB,
760                          unsigned &PreA, unsigned &PreB) const;
761 
762   //===--------------------------------------------------------------------===//
763   // Register Class Information
764   //
765 protected:
766   const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
767     return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
768   }
769 
770 public:
771   /// Register class iterators
772   regclass_iterator regclass_begin() const { return RegClassBegin; }
773   regclass_iterator regclass_end() const { return RegClassEnd; }
774   iterator_range<regclass_iterator> regclasses() const {
775     return make_range(regclass_begin(), regclass_end());
776   }
777 
778   unsigned getNumRegClasses() const {
779     return (unsigned)(regclass_end()-regclass_begin());
780   }
781 
782   /// Returns the register class associated with the enumeration value.
783   /// See class MCOperandInfo.
784   const TargetRegisterClass *getRegClass(unsigned i) const {
785     assert(i < getNumRegClasses() && "Register Class ID out of range");
786     return RegClassBegin[i];
787   }
788 
789   /// Returns the name of the register class.
790   const char *getRegClassName(const TargetRegisterClass *Class) const {
791     return MCRegisterInfo::getRegClassName(Class->MC);
792   }
793 
794   /// Find the largest common subclass of A and B.
795   /// Return NULL if there is no common subclass.
796   const TargetRegisterClass *
797   getCommonSubClass(const TargetRegisterClass *A,
798                     const TargetRegisterClass *B) const;
799 
800   /// Returns a TargetRegisterClass used for pointer values.
801   /// If a target supports multiple different pointer register classes,
802   /// kind specifies which one is indicated.
803   virtual const TargetRegisterClass *
804   getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
805     llvm_unreachable("Target didn't implement getPointerRegClass!");
806   }
807 
808   /// Returns a legal register class to copy a register in the specified class
809   /// to or from. If it is possible to copy the register directly without using
810   /// a cross register class copy, return the specified RC. Returns NULL if it
811   /// is not possible to copy between two registers of the specified class.
812   virtual const TargetRegisterClass *
813   getCrossCopyRegClass(const TargetRegisterClass *RC) const {
814     return RC;
815   }
816 
817   /// Returns the largest super class of RC that is legal to use in the current
818   /// sub-target and has the same spill size.
819   /// The returned register class can be used to create virtual registers which
820   /// means that all its registers can be copied and spilled.
821   virtual const TargetRegisterClass *
822   getLargestLegalSuperClass(const TargetRegisterClass *RC,
823                             const MachineFunction &) const {
824     /// The default implementation is very conservative and doesn't allow the
825     /// register allocator to inflate register classes.
826     return RC;
827   }
828 
829   /// Return the register pressure "high water mark" for the specific register
830   /// class. The scheduler is in high register pressure mode (for the specific
831   /// register class) if it goes over the limit.
832   ///
833   /// Note: this is the old register pressure model that relies on a manually
834   /// specified representative register class per value type.
835   virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
836                                        MachineFunction &MF) const {
837     return 0;
838   }
839 
840   /// Return a heuristic for the machine scheduler to compare the profitability
841   /// of increasing one register pressure set versus another.  The scheduler
842   /// will prefer increasing the register pressure of the set which returns
843   /// the largest value for this function.
844   virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
845                                           unsigned PSetID) const {
846     return PSetID;
847   }
848 
849   /// Get the weight in units of pressure for this register class.
850   virtual const RegClassWeight &getRegClassWeight(
851     const TargetRegisterClass *RC) const = 0;
852 
853   /// Returns size in bits of a phys/virtual/generic register.
854   unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
855 
856   /// Get the weight in units of pressure for this register unit.
857   virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
858 
859   /// Get the number of dimensions of register pressure.
860   virtual unsigned getNumRegPressureSets() const = 0;
861 
862   /// Get the name of this register unit pressure set.
863   virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
864 
865   /// Get the register unit pressure limit for this dimension.
866   /// This limit must be adjusted dynamically for reserved registers.
867   virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
868                                           unsigned Idx) const = 0;
869 
870   /// Get the dimensions of register pressure impacted by this register class.
871   /// Returns a -1 terminated array of pressure set IDs.
872   virtual const int *getRegClassPressureSets(
873     const TargetRegisterClass *RC) const = 0;
874 
875   /// Get the dimensions of register pressure impacted by this register unit.
876   /// Returns a -1 terminated array of pressure set IDs.
877   virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
878 
879   /// Get a list of 'hint' registers that the register allocator should try
880   /// first when allocating a physical register for the virtual register
881   /// VirtReg. These registers are effectively moved to the front of the
882   /// allocation order. If true is returned, regalloc will try to only use
883   /// hints to the greatest extent possible even if it means spilling.
884   ///
885   /// The Order argument is the allocation order for VirtReg's register class
886   /// as returned from RegisterClassInfo::getOrder(). The hint registers must
887   /// come from Order, and they must not be reserved.
888   ///
889   /// The default implementation of this function will only add target
890   /// independent register allocation hints. Targets that override this
891   /// function should typically call this default implementation as well and
892   /// expect to see generic copy hints added.
893   virtual bool
894   getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
895                         SmallVectorImpl<MCPhysReg> &Hints,
896                         const MachineFunction &MF,
897                         const VirtRegMap *VRM = nullptr,
898                         const LiveRegMatrix *Matrix = nullptr) const;
899 
900   /// A callback to allow target a chance to update register allocation hints
901   /// when a register is "changed" (e.g. coalesced) to another register.
902   /// e.g. On ARM, some virtual registers should target register pairs,
903   /// if one of pair is coalesced to another register, the allocation hint of
904   /// the other half of the pair should be changed to point to the new register.
905   virtual void updateRegAllocHint(Register Reg, Register NewReg,
906                                   MachineFunction &MF) const {
907     // Do nothing.
908   }
909 
910   /// Allow the target to reverse allocation order of local live ranges. This
911   /// will generally allocate shorter local live ranges first. For targets with
912   /// many registers, this could reduce regalloc compile time by a large
913   /// factor. It is disabled by default for three reasons:
914   /// (1) Top-down allocation is simpler and easier to debug for targets that
915   /// don't benefit from reversing the order.
916   /// (2) Bottom-up allocation could result in poor evicition decisions on some
917   /// targets affecting the performance of compiled code.
918   /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
919   virtual bool reverseLocalAssignment() const { return false; }
920 
921   /// Allow the target to override the cost of using a callee-saved register for
922   /// the first time. Default value of 0 means we will use a callee-saved
923   /// register if it is available.
924   virtual unsigned getCSRFirstUseCost() const { return 0; }
925 
926   /// Returns true if the target requires (and can make use of) the register
927   /// scavenger.
928   virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
929     return false;
930   }
931 
932   /// Returns true if the target wants to use frame pointer based accesses to
933   /// spill to the scavenger emergency spill slot.
934   virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
935     return true;
936   }
937 
938   /// Returns true if the target requires post PEI scavenging of registers for
939   /// materializing frame index constants.
940   virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
941     return false;
942   }
943 
944   /// Returns true if the target requires using the RegScavenger directly for
945   /// frame elimination despite using requiresFrameIndexScavenging.
946   virtual bool requiresFrameIndexReplacementScavenging(
947       const MachineFunction &MF) const {
948     return false;
949   }
950 
951   /// Returns true if the target wants the LocalStackAllocation pass to be run
952   /// and virtual base registers used for more efficient stack access.
953   virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
954     return false;
955   }
956 
957   /// Return true if target has reserved a spill slot in the stack frame of
958   /// the given function for the specified register. e.g. On x86, if the frame
959   /// register is required, the first fixed stack object is reserved as its
960   /// spill slot. This tells PEI not to create a new stack frame
961   /// object for the given register. It should be called only after
962   /// determineCalleeSaves().
963   virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
964                                     int &FrameIdx) const {
965     return false;
966   }
967 
968   /// Returns true if the live-ins should be tracked after register allocation.
969   virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
970     return true;
971   }
972 
973   /// True if the stack can be realigned for the target.
974   virtual bool canRealignStack(const MachineFunction &MF) const;
975 
976   /// True if storage within the function requires the stack pointer to be
977   /// aligned more than the normal calling convention calls for.
978   virtual bool shouldRealignStack(const MachineFunction &MF) const;
979 
980   /// True if stack realignment is required and still possible.
981   bool hasStackRealignment(const MachineFunction &MF) const {
982     return shouldRealignStack(MF) && canRealignStack(MF);
983   }
984 
985   /// Get the offset from the referenced frame index in the instruction,
986   /// if there is one.
987   virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
988                                            int Idx) const {
989     return 0;
990   }
991 
992   /// Returns true if the instruction's frame index reference would be better
993   /// served by a base register other than FP or SP.
994   /// Used by LocalStackFrameAllocation to determine which frame index
995   /// references it should create new base registers for.
996   virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
997     return false;
998   }
999 
1000   /// Insert defining instruction(s) for a pointer to FrameIdx before
1001   /// insertion point I. Return materialized frame pointer.
1002   virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
1003                                                 int FrameIdx,
1004                                                 int64_t Offset) const {
1005     llvm_unreachable("materializeFrameBaseRegister does not exist on this "
1006                      "target");
1007   }
1008 
1009   /// Resolve a frame index operand of an instruction
1010   /// to reference the indicated base register plus offset instead.
1011   virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1012                                  int64_t Offset) const {
1013     llvm_unreachable("resolveFrameIndex does not exist on this target");
1014   }
1015 
1016   /// Determine whether a given base register plus offset immediate is
1017   /// encodable to resolve a frame index.
1018   virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
1019                                   int64_t Offset) const {
1020     llvm_unreachable("isFrameOffsetLegal does not exist on this target");
1021   }
1022 
1023   /// Gets the DWARF expression opcodes for \p Offset.
1024   virtual void getOffsetOpcodes(const StackOffset &Offset,
1025                                 SmallVectorImpl<uint64_t> &Ops) const;
1026 
1027   /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
1028   DIExpression *
1029   prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
1030                           const StackOffset &Offset) const;
1031 
1032   /// Spill the register so it can be used by the register scavenger.
1033   /// Return true if the register was spilled, false otherwise.
1034   /// If this function does not spill the register, the scavenger
1035   /// will instead spill it to the emergency spill slot.
1036   virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
1037                                      MachineBasicBlock::iterator I,
1038                                      MachineBasicBlock::iterator &UseMI,
1039                                      const TargetRegisterClass *RC,
1040                                      Register Reg) const {
1041     return false;
1042   }
1043 
1044   /// Process frame indices in reverse block order. This changes the behavior of
1045   /// the RegScavenger passed to eliminateFrameIndex. If this is true targets
1046   /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets
1047   /// should prefer reverse scavenging behavior.
1048   virtual bool supportsBackwardScavenger() const { return false; }
1049 
1050   /// This method must be overriden to eliminate abstract frame indices from
1051   /// instructions which may use them. The instruction referenced by the
1052   /// iterator contains an MO_FrameIndex operand which must be eliminated by
1053   /// this method. This method may modify or replace the specified instruction,
1054   /// as long as it keeps the iterator pointing at the finished product.
1055   /// SPAdj is the SP adjustment due to call frame setup instruction.
1056   /// FIOperandNum is the FI operand number.
1057   /// Returns true if the current instruction was removed and the iterator
1058   /// is not longer valid
1059   virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
1060                                    int SPAdj, unsigned FIOperandNum,
1061                                    RegScavenger *RS = nullptr) const = 0;
1062 
1063   /// Return the assembly name for \p Reg.
1064   virtual StringRef getRegAsmName(MCRegister Reg) const {
1065     // FIXME: We are assuming that the assembly name is equal to the TableGen
1066     // name converted to lower case
1067     //
1068     // The TableGen name is the name of the definition for this register in the
1069     // target's tablegen files.  For example, the TableGen name of
1070     // def EAX : Register <...>; is "EAX"
1071     return StringRef(getName(Reg));
1072   }
1073 
1074   //===--------------------------------------------------------------------===//
1075   /// Subtarget Hooks
1076 
1077   /// SrcRC and DstRC will be morphed into NewRC if this returns true.
1078   virtual bool shouldCoalesce(MachineInstr *MI,
1079                               const TargetRegisterClass *SrcRC,
1080                               unsigned SubReg,
1081                               const TargetRegisterClass *DstRC,
1082                               unsigned DstSubReg,
1083                               const TargetRegisterClass *NewRC,
1084                               LiveIntervals &LIS) const
1085   { return true; }
1086 
1087   /// Region split has a high compile time cost especially for large live range.
1088   /// This method is used to decide whether or not \p VirtReg should
1089   /// go through this expensive splitting heuristic.
1090   virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
1091                                            const LiveInterval &VirtReg) const;
1092 
1093   /// Last chance recoloring has a high compile time cost especially for
1094   /// targets with a lot of registers.
1095   /// This method is used to decide whether or not \p VirtReg should
1096   /// go through this expensive heuristic.
1097   /// When this target hook is hit, by returning false, there is a high
1098   /// chance that the register allocation will fail altogether (usually with
1099   /// "ran out of registers").
1100   /// That said, this error usually points to another problem in the
1101   /// optimization pipeline.
1102   virtual bool
1103   shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
1104                                           const LiveInterval &VirtReg) const {
1105     return true;
1106   }
1107 
1108   /// Deferred spilling delays the spill insertion of a virtual register
1109   /// after every other allocation. By deferring the spilling, it is
1110   /// sometimes possible to eliminate that spilling altogether because
1111   /// something else could have been eliminated, thus leaving some space
1112   /// for the virtual register.
1113   /// However, this comes with a compile time impact because it adds one
1114   /// more stage to the greedy register allocator.
1115   /// This method is used to decide whether \p VirtReg should use the deferred
1116   /// spilling stage instead of being spilled right away.
1117   virtual bool
1118   shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
1119                                       const LiveInterval &VirtReg) const {
1120     return false;
1121   }
1122 
1123   /// When prioritizing live ranges in register allocation, if this hook returns
1124   /// true then the AllocationPriority of the register class will be treated as
1125   /// more important than whether the range is local to a basic block or global.
1126   virtual bool
1127   regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
1128     return false;
1129   }
1130 
1131   //===--------------------------------------------------------------------===//
1132   /// Debug information queries.
1133 
1134   /// getFrameRegister - This method should return the register used as a base
1135   /// for values allocated in the current stack frame.
1136   virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
1137 
1138   /// Mark a register and all its aliases as reserved in the given set.
1139   void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
1140 
1141   /// Returns true if for every register in the set all super registers are part
1142   /// of the set as well.
1143   bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
1144       ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
1145 
1146   virtual const TargetRegisterClass *
1147   getConstrainedRegClassForOperand(const MachineOperand &MO,
1148                                    const MachineRegisterInfo &MRI) const {
1149     return nullptr;
1150   }
1151 
1152   /// Returns the physical register number of sub-register "Index"
1153   /// for physical register RegNo. Return zero if the sub-register does not
1154   /// exist.
1155   inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
1156     return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
1157   }
1158 
1159   /// Some targets have non-allocatable registers that aren't technically part
1160   /// of the explicit callee saved register list, but should be handled as such
1161   /// in certain cases.
1162   virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
1163     return false;
1164   }
1165 };
1166 
1167 //===----------------------------------------------------------------------===//
1168 //                           SuperRegClassIterator
1169 //===----------------------------------------------------------------------===//
1170 //
1171 // Iterate over the possible super-registers for a given register class. The
1172 // iterator will visit a list of pairs (Idx, Mask) corresponding to the
1173 // possible classes of super-registers.
1174 //
1175 // Each bit mask will have at least one set bit, and each set bit in Mask
1176 // corresponds to a SuperRC such that:
1177 //
1178 //   For all Reg in SuperRC: Reg:Idx is in RC.
1179 //
1180 // The iterator can include (O, RC->getSubClassMask()) as the first entry which
1181 // also satisfies the above requirement, assuming Reg:0 == Reg.
1182 //
1183 class SuperRegClassIterator {
1184   const unsigned RCMaskWords;
1185   unsigned SubReg = 0;
1186   const uint16_t *Idx;
1187   const uint32_t *Mask;
1188 
1189 public:
1190   /// Create a SuperRegClassIterator that visits all the super-register classes
1191   /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
1192   SuperRegClassIterator(const TargetRegisterClass *RC,
1193                         const TargetRegisterInfo *TRI,
1194                         bool IncludeSelf = false)
1195     : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
1196       Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
1197     if (!IncludeSelf)
1198       ++*this;
1199   }
1200 
1201   /// Returns true if this iterator is still pointing at a valid entry.
1202   bool isValid() const { return Idx; }
1203 
1204   /// Returns the current sub-register index.
1205   unsigned getSubReg() const { return SubReg; }
1206 
1207   /// Returns the bit mask of register classes that getSubReg() projects into
1208   /// RC.
1209   /// See TargetRegisterClass::getSubClassMask() for how to use it.
1210   const uint32_t *getMask() const { return Mask; }
1211 
1212   /// Advance iterator to the next entry.
1213   void operator++() {
1214     assert(isValid() && "Cannot move iterator past end.");
1215     Mask += RCMaskWords;
1216     SubReg = *Idx++;
1217     if (!SubReg)
1218       Idx = nullptr;
1219   }
1220 };
1221 
1222 //===----------------------------------------------------------------------===//
1223 //                           BitMaskClassIterator
1224 //===----------------------------------------------------------------------===//
1225 /// This class encapuslates the logic to iterate over bitmask returned by
1226 /// the various RegClass related APIs.
1227 /// E.g., this class can be used to iterate over the subclasses provided by
1228 /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
1229 class BitMaskClassIterator {
1230   /// Total number of register classes.
1231   const unsigned NumRegClasses;
1232   /// Base index of CurrentChunk.
1233   /// In other words, the number of bit we read to get at the
1234   /// beginning of that chunck.
1235   unsigned Base = 0;
1236   /// Adjust base index of CurrentChunk.
1237   /// Base index + how many bit we read within CurrentChunk.
1238   unsigned Idx = 0;
1239   /// Current register class ID.
1240   unsigned ID = 0;
1241   /// Mask we are iterating over.
1242   const uint32_t *Mask;
1243   /// Current chunk of the Mask we are traversing.
1244   uint32_t CurrentChunk;
1245 
1246   /// Move ID to the next set bit.
1247   void moveToNextID() {
1248     // If the current chunk of memory is empty, move to the next one,
1249     // while making sure we do not go pass the number of register
1250     // classes.
1251     while (!CurrentChunk) {
1252       // Move to the next chunk.
1253       Base += 32;
1254       if (Base >= NumRegClasses) {
1255         ID = NumRegClasses;
1256         return;
1257       }
1258       CurrentChunk = *++Mask;
1259       Idx = Base;
1260     }
1261     // Otherwise look for the first bit set from the right
1262     // (representation of the class ID is big endian).
1263     // See getSubClassMask for more details on the representation.
1264     unsigned Offset = llvm::countr_zero(CurrentChunk);
1265     // Add the Offset to the adjusted base number of this chunk: Idx.
1266     // This is the ID of the register class.
1267     ID = Idx + Offset;
1268 
1269     // Consume the zeros, if any, and the bit we just read
1270     // so that we are at the right spot for the next call.
1271     // Do not do Offset + 1 because Offset may be 31 and 32
1272     // will be UB for the shift, though in that case we could
1273     // have make the chunk being equal to 0, but that would
1274     // have introduced a if statement.
1275     moveNBits(Offset);
1276     moveNBits(1);
1277   }
1278 
1279   /// Move \p NumBits Bits forward in CurrentChunk.
1280   void moveNBits(unsigned NumBits) {
1281     assert(NumBits < 32 && "Undefined behavior spotted!");
1282     // Consume the bit we read for the next call.
1283     CurrentChunk >>= NumBits;
1284     // Adjust the base for the chunk.
1285     Idx += NumBits;
1286   }
1287 
1288 public:
1289   /// Create a BitMaskClassIterator that visits all the register classes
1290   /// represented by \p Mask.
1291   ///
1292   /// \pre \p Mask != nullptr
1293   BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
1294       : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
1295     // Move to the first ID.
1296     moveToNextID();
1297   }
1298 
1299   /// Returns true if this iterator is still pointing at a valid entry.
1300   bool isValid() const { return getID() != NumRegClasses; }
1301 
1302   /// Returns the current register class ID.
1303   unsigned getID() const { return ID; }
1304 
1305   /// Advance iterator to the next entry.
1306   void operator++() {
1307     assert(isValid() && "Cannot move iterator past end.");
1308     moveToNextID();
1309   }
1310 };
1311 
1312 // This is useful when building IndexedMaps keyed on virtual registers
1313 struct VirtReg2IndexFunctor {
1314   using argument_type = Register;
1315   unsigned operator()(Register Reg) const {
1316     return Register::virtReg2Index(Reg);
1317   }
1318 };
1319 
1320 /// Prints virtual and physical registers with or without a TRI instance.
1321 ///
1322 /// The format is:
1323 ///   %noreg          - NoRegister
1324 ///   %5              - a virtual register.
1325 ///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
1326 ///   %eax            - a physical register
1327 ///   %physreg17      - a physical register when no TRI instance given.
1328 ///
1329 /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
1330 Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
1331                    unsigned SubIdx = 0,
1332                    const MachineRegisterInfo *MRI = nullptr);
1333 
1334 /// Create Printable object to print register units on a \ref raw_ostream.
1335 ///
1336 /// Register units are named after their root registers:
1337 ///
1338 ///   al      - Single root.
1339 ///   fp0~st7 - Dual roots.
1340 ///
1341 /// Usage: OS << printRegUnit(Unit, TRI) << '\n';
1342 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
1343 
1344 /// Create Printable object to print virtual registers and physical
1345 /// registers on a \ref raw_ostream.
1346 Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
1347 
1348 /// Create Printable object to print register classes or register banks
1349 /// on a \ref raw_ostream.
1350 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
1351                               const TargetRegisterInfo *TRI);
1352 
1353 } // end namespace llvm
1354 
1355 #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
1356