1 //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes an abstract interface used to get information about a
10 // target machines register file.  This information is used for a variety of
11 // purposed, especially register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
16 #define LLVM_CODEGEN_TARGETREGISTERINFO_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/IR/CallingConv.h"
24 #include "llvm/MC/LaneBitmask.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MachineValueType.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Support/Printable.h"
30 #include <cassert>
31 #include <cstdint>
32 
33 namespace llvm {
34 
35 class BitVector;
36 class DIExpression;
37 class LiveRegMatrix;
38 class MachineFunction;
39 class MachineInstr;
40 class RegScavenger;
41 class VirtRegMap;
42 class LiveIntervals;
43 class LiveInterval;
44 
45 class TargetRegisterClass {
46 public:
47   using iterator = const MCPhysReg *;
48   using const_iterator = const MCPhysReg *;
49   using sc_iterator = const TargetRegisterClass* const *;
50 
51   // Instance variables filled by tablegen, do not use!
52   const MCRegisterClass *MC;
53   const uint32_t *SubClassMask;
54   const uint16_t *SuperRegIndices;
55   const LaneBitmask LaneMask;
56   /// Classes with a higher priority value are assigned first by register
57   /// allocators using a greedy heuristic. The value is in the range [0,63].
58   /// Values >= 32 should be used with care since they may overlap with other
59   /// fields in the allocator's priority heuristics.
60   const uint8_t AllocationPriority;
61   /// Configurable target specific flags.
62   const uint8_t TSFlags;
63   /// Whether the class supports two (or more) disjunct subregister indices.
64   const bool HasDisjunctSubRegs;
65   /// Whether a combination of subregisters can cover every register in the
66   /// class. See also the CoveredBySubRegs description in Target.td.
67   const bool CoveredBySubRegs;
68   const sc_iterator SuperClasses;
69   ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
70 
71   /// Return the register class ID number.
72   unsigned getID() const { return MC->getID(); }
73 
74   /// begin/end - Return all of the registers in this class.
75   ///
76   iterator       begin() const { return MC->begin(); }
77   iterator         end() const { return MC->end(); }
78 
79   /// Return the number of registers in this class.
80   unsigned getNumRegs() const { return MC->getNumRegs(); }
81 
82   iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
83   getRegisters() const {
84     return make_range(MC->begin(), MC->end());
85   }
86 
87   /// Return the specified register in the class.
88   MCRegister getRegister(unsigned i) const {
89     return MC->getRegister(i);
90   }
91 
92   /// Return true if the specified register is included in this register class.
93   /// This does not include virtual registers.
94   bool contains(Register Reg) const {
95     /// FIXME: Historically this function has returned false when given vregs
96     ///        but it should probably only receive physical registers
97     if (!Reg.isPhysical())
98       return false;
99     return MC->contains(Reg.asMCReg());
100   }
101 
102   /// Return true if both registers are in this class.
103   bool contains(Register Reg1, Register Reg2) const {
104     /// FIXME: Historically this function has returned false when given a vregs
105     ///        but it should probably only receive physical registers
106     if (!Reg1.isPhysical() || !Reg2.isPhysical())
107       return false;
108     return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
109   }
110 
111   /// Return the cost of copying a value between two registers in this class.
112   /// A negative number means the register class is very expensive
113   /// to copy e.g. status flag register classes.
114   int getCopyCost() const { return MC->getCopyCost(); }
115 
116   /// Return true if this register class may be used to create virtual
117   /// registers.
118   bool isAllocatable() const { return MC->isAllocatable(); }
119 
120   /// Return true if the specified TargetRegisterClass
121   /// is a proper sub-class of this TargetRegisterClass.
122   bool hasSubClass(const TargetRegisterClass *RC) const {
123     return RC != this && hasSubClassEq(RC);
124   }
125 
126   /// Returns true if RC is a sub-class of or equal to this class.
127   bool hasSubClassEq(const TargetRegisterClass *RC) const {
128     unsigned ID = RC->getID();
129     return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
130   }
131 
132   /// Return true if the specified TargetRegisterClass is a
133   /// proper super-class of this TargetRegisterClass.
134   bool hasSuperClass(const TargetRegisterClass *RC) const {
135     return RC->hasSubClass(this);
136   }
137 
138   /// Returns true if RC is a super-class of or equal to this class.
139   bool hasSuperClassEq(const TargetRegisterClass *RC) const {
140     return RC->hasSubClassEq(this);
141   }
142 
143   /// Returns a bit vector of subclasses, including this one.
144   /// The vector is indexed by class IDs.
145   ///
146   /// To use it, consider the returned array as a chunk of memory that
147   /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
148   /// contains a bitset of the ID of the subclasses in big-endian style.
149 
150   /// I.e., the representation of the memory from left to right at the
151   /// bit level looks like:
152   /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
153   ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
154   /// Where the number represents the class ID and XXX bits that
155   /// should be ignored.
156   ///
157   /// See the implementation of hasSubClassEq for an example of how it
158   /// can be used.
159   const uint32_t *getSubClassMask() const {
160     return SubClassMask;
161   }
162 
163   /// Returns a 0-terminated list of sub-register indices that project some
164   /// super-register class into this register class. The list has an entry for
165   /// each Idx such that:
166   ///
167   ///   There exists SuperRC where:
168   ///     For all Reg in SuperRC:
169   ///       this->contains(Reg:Idx)
170   const uint16_t *getSuperRegIndices() const {
171     return SuperRegIndices;
172   }
173 
174   /// Returns a NULL-terminated list of super-classes.  The
175   /// classes are ordered by ID which is also a topological ordering from large
176   /// to small classes.  The list does NOT include the current class.
177   sc_iterator getSuperClasses() const {
178     return SuperClasses;
179   }
180 
181   /// Return true if this TargetRegisterClass is a subset
182   /// class of at least one other TargetRegisterClass.
183   bool isASubClass() const {
184     return SuperClasses[0] != nullptr;
185   }
186 
187   /// Returns the preferred order for allocating registers from this register
188   /// class in MF. The raw order comes directly from the .td file and may
189   /// include reserved registers that are not allocatable.
190   /// Register allocators should also make sure to allocate
191   /// callee-saved registers only after all the volatiles are used. The
192   /// RegisterClassInfo class provides filtered allocation orders with
193   /// callee-saved registers moved to the end.
194   ///
195   /// The MachineFunction argument can be used to tune the allocatable
196   /// registers based on the characteristics of the function, subtarget, or
197   /// other criteria.
198   ///
199   /// By default, this method returns all registers in the class.
200   ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
201     return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
202   }
203 
204   /// Returns the combination of all lane masks of register in this class.
205   /// The lane masks of the registers are the combination of all lane masks
206   /// of their subregisters. Returns 1 if there are no subregisters.
207   LaneBitmask getLaneMask() const {
208     return LaneMask;
209   }
210 };
211 
212 /// Extra information, not in MCRegisterDesc, about registers.
213 /// These are used by codegen, not by MC.
214 struct TargetRegisterInfoDesc {
215   const uint8_t *CostPerUse; // Extra cost of instructions using register.
216   unsigned NumCosts; // Number of cost values associated with each register.
217   const bool
218       *InAllocatableClass; // Register belongs to an allocatable regclass.
219 };
220 
221 /// Each TargetRegisterClass has a per register weight, and weight
222 /// limit which must be less than the limits of its pressure sets.
223 struct RegClassWeight {
224   unsigned RegWeight;
225   unsigned WeightLimit;
226 };
227 
228 /// TargetRegisterInfo base class - We assume that the target defines a static
229 /// array of TargetRegisterDesc objects that represent all of the machine
230 /// registers that the target has.  As such, we simply have to track a pointer
231 /// to this array so that we can turn register number into a register
232 /// descriptor.
233 ///
234 class TargetRegisterInfo : public MCRegisterInfo {
235 public:
236   using regclass_iterator = const TargetRegisterClass * const *;
237   using vt_iterator = const MVT::SimpleValueType *;
238   struct RegClassInfo {
239     unsigned RegSize, SpillSize, SpillAlignment;
240     vt_iterator VTList;
241   };
242 private:
243   const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
244   const char *const *SubRegIndexNames;        // Names of subreg indexes.
245   // Pointer to array of lane masks, one per sub-reg index.
246   const LaneBitmask *SubRegIndexLaneMasks;
247 
248   regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
249   LaneBitmask CoveringLanes;
250   const RegClassInfo *const RCInfos;
251   unsigned HwMode;
252 
253 protected:
254   TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
255                      regclass_iterator RCB,
256                      regclass_iterator RCE,
257                      const char *const *SRINames,
258                      const LaneBitmask *SRILaneMasks,
259                      LaneBitmask CoveringLanes,
260                      const RegClassInfo *const RCIs,
261                      unsigned Mode = 0);
262   virtual ~TargetRegisterInfo();
263 
264 public:
265   // Register numbers can represent physical registers, virtual registers, and
266   // sometimes stack slots. The unsigned values are divided into these ranges:
267   //
268   //   0           Not a register, can be used as a sentinel.
269   //   [1;2^30)    Physical registers assigned by TableGen.
270   //   [2^30;2^31) Stack slots. (Rarely used.)
271   //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
272   //
273   // Further sentinels can be allocated from the small negative integers.
274   // DenseMapInfo<unsigned> uses -1u and -2u.
275 
276   /// Return the size in bits of a register from class RC.
277   unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
278     return getRegClassInfo(RC).RegSize;
279   }
280 
281   /// Return the size in bytes of the stack slot allocated to hold a spilled
282   /// copy of a register from class RC.
283   unsigned getSpillSize(const TargetRegisterClass &RC) const {
284     return getRegClassInfo(RC).SpillSize / 8;
285   }
286 
287   /// Return the minimum required alignment in bytes for a spill slot for
288   /// a register of this class.
289   Align getSpillAlign(const TargetRegisterClass &RC) const {
290     return Align(getRegClassInfo(RC).SpillAlignment / 8);
291   }
292 
293   /// Return true if the given TargetRegisterClass has the ValueType T.
294   bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
295     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
296       if (MVT(*I) == T)
297         return true;
298     return false;
299   }
300 
301   /// Return true if the given TargetRegisterClass is compatible with LLT T.
302   bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
303     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
304       MVT VT(*I);
305       if (VT == MVT::Untyped)
306         return true;
307 
308       if (LLT(VT) == T)
309         return true;
310     }
311     return false;
312   }
313 
314   /// Loop over all of the value types that can be represented by values
315   /// in the given register class.
316   vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
317     return getRegClassInfo(RC).VTList;
318   }
319 
320   vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
321     vt_iterator I = legalclasstypes_begin(RC);
322     while (*I != MVT::Other)
323       ++I;
324     return I;
325   }
326 
327   /// Returns the Register Class of a physical register of the given type,
328   /// picking the most sub register class of the right type that contains this
329   /// physreg.
330   const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
331                                                     MVT VT = MVT::Other) const;
332 
333   /// Returns the Register Class of a physical register of the given type,
334   /// picking the most sub register class of the right type that contains this
335   /// physreg. If there is no register class compatible with the given type,
336   /// returns nullptr.
337   const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
338                                                        LLT Ty = LLT()) const;
339 
340   /// Return the maximal subclass of the given register class that is
341   /// allocatable or NULL.
342   const TargetRegisterClass *
343     getAllocatableClass(const TargetRegisterClass *RC) const;
344 
345   /// Returns a bitset indexed by register number indicating if a register is
346   /// allocatable or not. If a register class is specified, returns the subset
347   /// for the class.
348   BitVector getAllocatableSet(const MachineFunction &MF,
349                               const TargetRegisterClass *RC = nullptr) const;
350 
351   /// Get a list of cost values for all registers that correspond to the index
352   /// returned by RegisterCostTableIndex.
353   ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
354     unsigned Idx = getRegisterCostTableIndex(MF);
355     unsigned NumRegs = getNumRegs();
356     assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
357 
358     return makeArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
359   }
360 
361   /// Return true if the register is in the allocation of any register class.
362   bool isInAllocatableClass(MCRegister RegNo) const {
363     return InfoDesc->InAllocatableClass[RegNo];
364   }
365 
366   /// Return the human-readable symbolic target-specific
367   /// name for the specified SubRegIndex.
368   const char *getSubRegIndexName(unsigned SubIdx) const {
369     assert(SubIdx && SubIdx < getNumSubRegIndices() &&
370            "This is not a subregister index");
371     return SubRegIndexNames[SubIdx-1];
372   }
373 
374   /// Return a bitmask representing the parts of a register that are covered by
375   /// SubIdx \see LaneBitmask.
376   ///
377   /// SubIdx == 0 is allowed, it has the lane mask ~0u.
378   LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
379     assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
380     return SubRegIndexLaneMasks[SubIdx];
381   }
382 
383   /// Try to find one or more subregister indexes to cover \p LaneMask.
384   ///
385   /// If this is possible, returns true and appends the best matching set of
386   /// indexes to \p Indexes. If this is not possible, returns false.
387   bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
388                                 const TargetRegisterClass *RC,
389                                 LaneBitmask LaneMask,
390                                 SmallVectorImpl<unsigned> &Indexes) const;
391 
392   /// The lane masks returned by getSubRegIndexLaneMask() above can only be
393   /// used to determine if sub-registers overlap - they can't be used to
394   /// determine if a set of sub-registers completely cover another
395   /// sub-register.
396   ///
397   /// The X86 general purpose registers have two lanes corresponding to the
398   /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
399   /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
400   /// sub_32bit sub-register.
401   ///
402   /// On the other hand, the ARM NEON lanes fully cover their registers: The
403   /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
404   /// This is related to the CoveredBySubRegs property on register definitions.
405   ///
406   /// This function returns a bit mask of lanes that completely cover their
407   /// sub-registers. More precisely, given:
408   ///
409   ///   Covering = getCoveringLanes();
410   ///   MaskA = getSubRegIndexLaneMask(SubA);
411   ///   MaskB = getSubRegIndexLaneMask(SubB);
412   ///
413   /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
414   /// SubB.
415   LaneBitmask getCoveringLanes() const { return CoveringLanes; }
416 
417   /// Returns true if the two registers are equal or alias each other.
418   /// The registers may be virtual registers.
419   bool regsOverlap(Register RegA, Register RegB) const {
420     if (RegA == RegB)
421       return true;
422     if (RegA.isPhysical() && RegB.isPhysical())
423       return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg());
424     return false;
425   }
426 
427   /// Returns true if Reg contains RegUnit.
428   bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
429     for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
430       if (Register(*Units) == RegUnit)
431         return true;
432     return false;
433   }
434 
435   /// Returns the original SrcReg unless it is the target of a copy-like
436   /// operation, in which case we chain backwards through all such operations
437   /// to the ultimate source register.  If a physical register is encountered,
438   /// we stop the search.
439   virtual Register lookThruCopyLike(Register SrcReg,
440                                     const MachineRegisterInfo *MRI) const;
441 
442   /// Find the original SrcReg unless it is the target of a copy-like operation,
443   /// in which case we chain backwards through all such operations to the
444   /// ultimate source register. If a physical register is encountered, we stop
445   /// the search.
446   /// Return the original SrcReg if all the definitions in the chain only have
447   /// one user and not a physical register.
448   virtual Register
449   lookThruSingleUseCopyChain(Register SrcReg,
450                              const MachineRegisterInfo *MRI) const;
451 
452   /// Return a null-terminated list of all of the callee-saved registers on
453   /// this target. The register should be in the order of desired callee-save
454   /// stack frame offset. The first register is closest to the incoming stack
455   /// pointer if stack grows down, and vice versa.
456   /// Notice: This function does not take into account disabled CSRs.
457   ///         In most cases you will want to use instead the function
458   ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
459   virtual const MCPhysReg*
460   getCalleeSavedRegs(const MachineFunction *MF) const = 0;
461 
462   /// Return a mask of call-preserved registers for the given calling convention
463   /// on the current function. The mask should include all call-preserved
464   /// aliases. This is used by the register allocator to determine which
465   /// registers can be live across a call.
466   ///
467   /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
468   /// A set bit indicates that all bits of the corresponding register are
469   /// preserved across the function call.  The bit mask is expected to be
470   /// sub-register complete, i.e. if A is preserved, so are all its
471   /// sub-registers.
472   ///
473   /// Bits are numbered from the LSB, so the bit for physical register Reg can
474   /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
475   ///
476   /// A NULL pointer means that no register mask will be used, and call
477   /// instructions should use implicit-def operands to indicate call clobbered
478   /// registers.
479   ///
480   virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
481                                                CallingConv::ID) const {
482     // The default mask clobbers everything.  All targets should override.
483     return nullptr;
484   }
485 
486   /// Return a register mask for the registers preserved by the unwinder,
487   /// or nullptr if no custom mask is needed.
488   virtual const uint32_t *
489   getCustomEHPadPreservedMask(const MachineFunction &MF) const {
490     return nullptr;
491   }
492 
493   /// Return a register mask that clobbers everything.
494   virtual const uint32_t *getNoPreservedMask() const {
495     llvm_unreachable("target does not provide no preserved mask");
496   }
497 
498   /// Return a list of all of the registers which are clobbered "inside" a call
499   /// to the given function. For example, these might be needed for PLT
500   /// sequences of long-branch veneers.
501   virtual ArrayRef<MCPhysReg>
502   getIntraCallClobberedRegs(const MachineFunction *MF) const {
503     return {};
504   }
505 
506   /// Return true if all bits that are set in mask \p mask0 are also set in
507   /// \p mask1.
508   bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
509 
510   /// Return all the call-preserved register masks defined for this target.
511   virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
512   virtual ArrayRef<const char *> getRegMaskNames() const = 0;
513 
514   /// Returns a bitset indexed by physical register number indicating if a
515   /// register is a special register that has particular uses and should be
516   /// considered unavailable at all times, e.g. stack pointer, return address.
517   /// A reserved register:
518   /// - is not allocatable
519   /// - is considered always live
520   /// - is ignored by liveness tracking
521   /// It is often necessary to reserve the super registers of a reserved
522   /// register as well, to avoid them getting allocated indirectly. You may use
523   /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
524   virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
525 
526   /// Returns false if we can't guarantee that Physreg, specified as an IR asm
527   /// clobber constraint, will be preserved across the statement.
528   virtual bool isAsmClobberable(const MachineFunction &MF,
529                                 MCRegister PhysReg) const {
530     return true;
531   }
532 
533   /// Returns true if PhysReg cannot be written to in inline asm statements.
534   virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
535                                       unsigned PhysReg) const {
536     return false;
537   }
538 
539   /// Returns true if PhysReg is unallocatable and constant throughout the
540   /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
541   virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
542 
543   /// Returns true if the register class is considered divergent.
544   virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
545     return false;
546   }
547 
548   /// Physical registers that may be modified within a function but are
549   /// guaranteed to be restored before any uses. This is useful for targets that
550   /// have call sequences where a GOT register may be updated by the caller
551   /// prior to a call and is guaranteed to be restored (also by the caller)
552   /// after the call.
553   virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
554                                         const MachineFunction &MF) const {
555     return false;
556   }
557 
558   /// This is a wrapper around getCallPreservedMask().
559   /// Return true if the register is preserved after the call.
560   virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
561                                     const MachineFunction &MF) const;
562 
563   /// Returns true if PhysReg can be used as an argument to a function.
564   virtual bool isArgumentRegister(const MachineFunction &MF,
565                                   MCRegister PhysReg) const {
566     return false;
567   }
568 
569   /// Returns true if PhysReg is a fixed register.
570   virtual bool isFixedRegister(const MachineFunction &MF,
571                                MCRegister PhysReg) const {
572     return false;
573   }
574 
575   /// Returns true if PhysReg is a general purpose register.
576   virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
577                                         MCRegister PhysReg) const {
578     return false;
579   }
580 
581   /// Prior to adding the live-out mask to a stackmap or patchpoint
582   /// instruction, provide the target the opportunity to adjust it (mainly to
583   /// remove pseudo-registers that should be ignored).
584   virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
585 
586   /// Return a super-register of the specified register
587   /// Reg so its sub-register of index SubIdx is Reg.
588   MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
589                                  const TargetRegisterClass *RC) const {
590     return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
591   }
592 
593   /// Return a subclass of the specified register
594   /// class A so that each register in it has a sub-register of the
595   /// specified sub-register index which is in the specified register class B.
596   ///
597   /// TableGen will synthesize missing A sub-classes.
598   virtual const TargetRegisterClass *
599   getMatchingSuperRegClass(const TargetRegisterClass *A,
600                            const TargetRegisterClass *B, unsigned Idx) const;
601 
602   // For a copy-like instruction that defines a register of class DefRC with
603   // subreg index DefSubReg, reading from another source with class SrcRC and
604   // subregister SrcSubReg return true if this is a preferable copy
605   // instruction or an earlier use should be used.
606   virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
607                                     unsigned DefSubReg,
608                                     const TargetRegisterClass *SrcRC,
609                                     unsigned SrcSubReg) const;
610 
611   /// Returns the largest legal sub-class of RC that
612   /// supports the sub-register index Idx.
613   /// If no such sub-class exists, return NULL.
614   /// If all registers in RC already have an Idx sub-register, return RC.
615   ///
616   /// TableGen generates a version of this function that is good enough in most
617   /// cases.  Targets can override if they have constraints that TableGen
618   /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
619   /// supported by the full GR32 register class in 64-bit mode, but only by the
620   /// GR32_ABCD regiister class in 32-bit mode.
621   ///
622   /// TableGen will synthesize missing RC sub-classes.
623   virtual const TargetRegisterClass *
624   getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
625     assert(Idx == 0 && "Target has no sub-registers");
626     return RC;
627   }
628 
629   /// Return the subregister index you get from composing
630   /// two subregister indices.
631   ///
632   /// The special null sub-register index composes as the identity.
633   ///
634   /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
635   /// returns c. Note that composeSubRegIndices does not tell you about illegal
636   /// compositions. If R does not have a subreg a, or R:a does not have a subreg
637   /// b, composeSubRegIndices doesn't tell you.
638   ///
639   /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
640   /// ssub_0:S0 - ssub_3:S3 subregs.
641   /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
642   unsigned composeSubRegIndices(unsigned a, unsigned b) const {
643     if (!a) return b;
644     if (!b) return a;
645     return composeSubRegIndicesImpl(a, b);
646   }
647 
648   /// Transforms a LaneMask computed for one subregister to the lanemask that
649   /// would have been computed when composing the subsubregisters with IdxA
650   /// first. @sa composeSubRegIndices()
651   LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
652                                          LaneBitmask Mask) const {
653     if (!IdxA)
654       return Mask;
655     return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
656   }
657 
658   /// Transform a lanemask given for a virtual register to the corresponding
659   /// lanemask before using subregister with index \p IdxA.
660   /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
661   /// valie lane mask (no invalid bits set) the following holds:
662   /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
663   /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
664   /// => X1 == Mask
665   LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
666                                                 LaneBitmask LaneMask) const {
667     if (!IdxA)
668       return LaneMask;
669     return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
670   }
671 
672   /// Debugging helper: dump register in human readable form to dbgs() stream.
673   static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
674                       const TargetRegisterInfo *TRI = nullptr);
675 
676 protected:
677   /// Overridden by TableGen in targets that have sub-registers.
678   virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
679     llvm_unreachable("Target has no sub-registers");
680   }
681 
682   /// Overridden by TableGen in targets that have sub-registers.
683   virtual LaneBitmask
684   composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
685     llvm_unreachable("Target has no sub-registers");
686   }
687 
688   virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
689                                                             LaneBitmask) const {
690     llvm_unreachable("Target has no sub-registers");
691   }
692 
693   /// Return the register cost table index. This implementation is sufficient
694   /// for most architectures and can be overriden by targets in case there are
695   /// multiple cost values associated with each register.
696   virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
697     return 0;
698   }
699 
700 public:
701   /// Find a common super-register class if it exists.
702   ///
703   /// Find a register class, SuperRC and two sub-register indices, PreA and
704   /// PreB, such that:
705   ///
706   ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
707   ///
708   ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
709   ///
710   ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
711   ///
712   /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
713   /// requirements, and there is no register class with a smaller spill size
714   /// that satisfies the requirements.
715   ///
716   /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
717   ///
718   /// Either of the PreA and PreB sub-register indices may be returned as 0. In
719   /// that case, the returned register class will be a sub-class of the
720   /// corresponding argument register class.
721   ///
722   /// The function returns NULL if no register class can be found.
723   const TargetRegisterClass*
724   getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
725                          const TargetRegisterClass *RCB, unsigned SubB,
726                          unsigned &PreA, unsigned &PreB) const;
727 
728   //===--------------------------------------------------------------------===//
729   // Register Class Information
730   //
731 protected:
732   const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
733     return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
734   }
735 
736 public:
737   /// Register class iterators
738   regclass_iterator regclass_begin() const { return RegClassBegin; }
739   regclass_iterator regclass_end() const { return RegClassEnd; }
740   iterator_range<regclass_iterator> regclasses() const {
741     return make_range(regclass_begin(), regclass_end());
742   }
743 
744   unsigned getNumRegClasses() const {
745     return (unsigned)(regclass_end()-regclass_begin());
746   }
747 
748   /// Returns the register class associated with the enumeration value.
749   /// See class MCOperandInfo.
750   const TargetRegisterClass *getRegClass(unsigned i) const {
751     assert(i < getNumRegClasses() && "Register Class ID out of range");
752     return RegClassBegin[i];
753   }
754 
755   /// Returns the name of the register class.
756   const char *getRegClassName(const TargetRegisterClass *Class) const {
757     return MCRegisterInfo::getRegClassName(Class->MC);
758   }
759 
760   /// Find the largest common subclass of A and B.
761   /// Return NULL if there is no common subclass.
762   const TargetRegisterClass *
763   getCommonSubClass(const TargetRegisterClass *A,
764                     const TargetRegisterClass *B) const;
765 
766   /// Returns a TargetRegisterClass used for pointer values.
767   /// If a target supports multiple different pointer register classes,
768   /// kind specifies which one is indicated.
769   virtual const TargetRegisterClass *
770   getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
771     llvm_unreachable("Target didn't implement getPointerRegClass!");
772   }
773 
774   /// Returns a legal register class to copy a register in the specified class
775   /// to or from. If it is possible to copy the register directly without using
776   /// a cross register class copy, return the specified RC. Returns NULL if it
777   /// is not possible to copy between two registers of the specified class.
778   virtual const TargetRegisterClass *
779   getCrossCopyRegClass(const TargetRegisterClass *RC) const {
780     return RC;
781   }
782 
783   /// Returns the largest super class of RC that is legal to use in the current
784   /// sub-target and has the same spill size.
785   /// The returned register class can be used to create virtual registers which
786   /// means that all its registers can be copied and spilled.
787   virtual const TargetRegisterClass *
788   getLargestLegalSuperClass(const TargetRegisterClass *RC,
789                             const MachineFunction &) const {
790     /// The default implementation is very conservative and doesn't allow the
791     /// register allocator to inflate register classes.
792     return RC;
793   }
794 
795   /// Return the register pressure "high water mark" for the specific register
796   /// class. The scheduler is in high register pressure mode (for the specific
797   /// register class) if it goes over the limit.
798   ///
799   /// Note: this is the old register pressure model that relies on a manually
800   /// specified representative register class per value type.
801   virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
802                                        MachineFunction &MF) const {
803     return 0;
804   }
805 
806   /// Return a heuristic for the machine scheduler to compare the profitability
807   /// of increasing one register pressure set versus another.  The scheduler
808   /// will prefer increasing the register pressure of the set which returns
809   /// the largest value for this function.
810   virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
811                                           unsigned PSetID) const {
812     return PSetID;
813   }
814 
815   /// Get the weight in units of pressure for this register class.
816   virtual const RegClassWeight &getRegClassWeight(
817     const TargetRegisterClass *RC) const = 0;
818 
819   /// Returns size in bits of a phys/virtual/generic register.
820   unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
821 
822   /// Get the weight in units of pressure for this register unit.
823   virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
824 
825   /// Get the number of dimensions of register pressure.
826   virtual unsigned getNumRegPressureSets() const = 0;
827 
828   /// Get the name of this register unit pressure set.
829   virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
830 
831   /// Get the register unit pressure limit for this dimension.
832   /// This limit must be adjusted dynamically for reserved registers.
833   virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
834                                           unsigned Idx) const = 0;
835 
836   /// Get the dimensions of register pressure impacted by this register class.
837   /// Returns a -1 terminated array of pressure set IDs.
838   virtual const int *getRegClassPressureSets(
839     const TargetRegisterClass *RC) const = 0;
840 
841   /// Get the dimensions of register pressure impacted by this register unit.
842   /// Returns a -1 terminated array of pressure set IDs.
843   virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
844 
845   /// Get a list of 'hint' registers that the register allocator should try
846   /// first when allocating a physical register for the virtual register
847   /// VirtReg. These registers are effectively moved to the front of the
848   /// allocation order. If true is returned, regalloc will try to only use
849   /// hints to the greatest extent possible even if it means spilling.
850   ///
851   /// The Order argument is the allocation order for VirtReg's register class
852   /// as returned from RegisterClassInfo::getOrder(). The hint registers must
853   /// come from Order, and they must not be reserved.
854   ///
855   /// The default implementation of this function will only add target
856   /// independent register allocation hints. Targets that override this
857   /// function should typically call this default implementation as well and
858   /// expect to see generic copy hints added.
859   virtual bool
860   getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
861                         SmallVectorImpl<MCPhysReg> &Hints,
862                         const MachineFunction &MF,
863                         const VirtRegMap *VRM = nullptr,
864                         const LiveRegMatrix *Matrix = nullptr) const;
865 
866   /// A callback to allow target a chance to update register allocation hints
867   /// when a register is "changed" (e.g. coalesced) to another register.
868   /// e.g. On ARM, some virtual registers should target register pairs,
869   /// if one of pair is coalesced to another register, the allocation hint of
870   /// the other half of the pair should be changed to point to the new register.
871   virtual void updateRegAllocHint(Register Reg, Register NewReg,
872                                   MachineFunction &MF) const {
873     // Do nothing.
874   }
875 
876   /// Allow the target to reverse allocation order of local live ranges. This
877   /// will generally allocate shorter local live ranges first. For targets with
878   /// many registers, this could reduce regalloc compile time by a large
879   /// factor. It is disabled by default for three reasons:
880   /// (1) Top-down allocation is simpler and easier to debug for targets that
881   /// don't benefit from reversing the order.
882   /// (2) Bottom-up allocation could result in poor evicition decisions on some
883   /// targets affecting the performance of compiled code.
884   /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
885   virtual bool reverseLocalAssignment() const { return false; }
886 
887   /// Allow the target to override the cost of using a callee-saved register for
888   /// the first time. Default value of 0 means we will use a callee-saved
889   /// register if it is available.
890   virtual unsigned getCSRFirstUseCost() const { return 0; }
891 
892   /// Returns true if the target requires (and can make use of) the register
893   /// scavenger.
894   virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
895     return false;
896   }
897 
898   /// Returns true if the target wants to use frame pointer based accesses to
899   /// spill to the scavenger emergency spill slot.
900   virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
901     return true;
902   }
903 
904   /// Returns true if the target requires post PEI scavenging of registers for
905   /// materializing frame index constants.
906   virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
907     return false;
908   }
909 
910   /// Returns true if the target requires using the RegScavenger directly for
911   /// frame elimination despite using requiresFrameIndexScavenging.
912   virtual bool requiresFrameIndexReplacementScavenging(
913       const MachineFunction &MF) const {
914     return false;
915   }
916 
917   /// Returns true if the target wants the LocalStackAllocation pass to be run
918   /// and virtual base registers used for more efficient stack access.
919   virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
920     return false;
921   }
922 
923   /// Return true if target has reserved a spill slot in the stack frame of
924   /// the given function for the specified register. e.g. On x86, if the frame
925   /// register is required, the first fixed stack object is reserved as its
926   /// spill slot. This tells PEI not to create a new stack frame
927   /// object for the given register. It should be called only after
928   /// determineCalleeSaves().
929   virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
930                                     int &FrameIdx) const {
931     return false;
932   }
933 
934   /// Returns true if the live-ins should be tracked after register allocation.
935   virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
936     return true;
937   }
938 
939   /// True if the stack can be realigned for the target.
940   virtual bool canRealignStack(const MachineFunction &MF) const;
941 
942   /// True if storage within the function requires the stack pointer to be
943   /// aligned more than the normal calling convention calls for.
944   virtual bool shouldRealignStack(const MachineFunction &MF) const;
945 
946   /// True if stack realignment is required and still possible.
947   bool hasStackRealignment(const MachineFunction &MF) const {
948     return shouldRealignStack(MF) && canRealignStack(MF);
949   }
950 
951   /// Get the offset from the referenced frame index in the instruction,
952   /// if there is one.
953   virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
954                                            int Idx) const {
955     return 0;
956   }
957 
958   /// Returns true if the instruction's frame index reference would be better
959   /// served by a base register other than FP or SP.
960   /// Used by LocalStackFrameAllocation to determine which frame index
961   /// references it should create new base registers for.
962   virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
963     return false;
964   }
965 
966   /// Insert defining instruction(s) for a pointer to FrameIdx before
967   /// insertion point I. Return materialized frame pointer.
968   virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
969                                                 int FrameIdx,
970                                                 int64_t Offset) const {
971     llvm_unreachable("materializeFrameBaseRegister does not exist on this "
972                      "target");
973   }
974 
975   /// Resolve a frame index operand of an instruction
976   /// to reference the indicated base register plus offset instead.
977   virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
978                                  int64_t Offset) const {
979     llvm_unreachable("resolveFrameIndex does not exist on this target");
980   }
981 
982   /// Determine whether a given base register plus offset immediate is
983   /// encodable to resolve a frame index.
984   virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
985                                   int64_t Offset) const {
986     llvm_unreachable("isFrameOffsetLegal does not exist on this target");
987   }
988 
989   /// Gets the DWARF expression opcodes for \p Offset.
990   virtual void getOffsetOpcodes(const StackOffset &Offset,
991                                 SmallVectorImpl<uint64_t> &Ops) const;
992 
993   /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
994   DIExpression *
995   prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
996                           const StackOffset &Offset) const;
997 
998   /// Spill the register so it can be used by the register scavenger.
999   /// Return true if the register was spilled, false otherwise.
1000   /// If this function does not spill the register, the scavenger
1001   /// will instead spill it to the emergency spill slot.
1002   virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
1003                                      MachineBasicBlock::iterator I,
1004                                      MachineBasicBlock::iterator &UseMI,
1005                                      const TargetRegisterClass *RC,
1006                                      Register Reg) const {
1007     return false;
1008   }
1009 
1010   /// This method must be overriden to eliminate abstract frame indices from
1011   /// instructions which may use them. The instruction referenced by the
1012   /// iterator contains an MO_FrameIndex operand which must be eliminated by
1013   /// this method. This method may modify or replace the specified instruction,
1014   /// as long as it keeps the iterator pointing at the finished product.
1015   /// SPAdj is the SP adjustment due to call frame setup instruction.
1016   /// FIOperandNum is the FI operand number.
1017   virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
1018                                    int SPAdj, unsigned FIOperandNum,
1019                                    RegScavenger *RS = nullptr) const = 0;
1020 
1021   /// Return the assembly name for \p Reg.
1022   virtual StringRef getRegAsmName(MCRegister Reg) const {
1023     // FIXME: We are assuming that the assembly name is equal to the TableGen
1024     // name converted to lower case
1025     //
1026     // The TableGen name is the name of the definition for this register in the
1027     // target's tablegen files.  For example, the TableGen name of
1028     // def EAX : Register <...>; is "EAX"
1029     return StringRef(getName(Reg));
1030   }
1031 
1032   //===--------------------------------------------------------------------===//
1033   /// Subtarget Hooks
1034 
1035   /// SrcRC and DstRC will be morphed into NewRC if this returns true.
1036   virtual bool shouldCoalesce(MachineInstr *MI,
1037                               const TargetRegisterClass *SrcRC,
1038                               unsigned SubReg,
1039                               const TargetRegisterClass *DstRC,
1040                               unsigned DstSubReg,
1041                               const TargetRegisterClass *NewRC,
1042                               LiveIntervals &LIS) const
1043   { return true; }
1044 
1045   /// Region split has a high compile time cost especially for large live range.
1046   /// This method is used to decide whether or not \p VirtReg should
1047   /// go through this expensive splitting heuristic.
1048   virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
1049                                            const LiveInterval &VirtReg) const;
1050 
1051   /// Last chance recoloring has a high compile time cost especially for
1052   /// targets with a lot of registers.
1053   /// This method is used to decide whether or not \p VirtReg should
1054   /// go through this expensive heuristic.
1055   /// When this target hook is hit, by returning false, there is a high
1056   /// chance that the register allocation will fail altogether (usually with
1057   /// "ran out of registers").
1058   /// That said, this error usually points to another problem in the
1059   /// optimization pipeline.
1060   virtual bool
1061   shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
1062                                           const LiveInterval &VirtReg) const {
1063     return true;
1064   }
1065 
1066   /// Deferred spilling delays the spill insertion of a virtual register
1067   /// after every other allocation. By deferring the spilling, it is
1068   /// sometimes possible to eliminate that spilling altogether because
1069   /// something else could have been eliminated, thus leaving some space
1070   /// for the virtual register.
1071   /// However, this comes with a compile time impact because it adds one
1072   /// more stage to the greedy register allocator.
1073   /// This method is used to decide whether \p VirtReg should use the deferred
1074   /// spilling stage instead of being spilled right away.
1075   virtual bool
1076   shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
1077                                       const LiveInterval &VirtReg) const {
1078     return false;
1079   }
1080 
1081   /// When prioritizing live ranges in register allocation, if this hook returns
1082   /// true then the AllocationPriority of the register class will be treated as
1083   /// more important than whether the range is local to a basic block or global.
1084   virtual bool
1085   regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
1086     return false;
1087   }
1088 
1089   //===--------------------------------------------------------------------===//
1090   /// Debug information queries.
1091 
1092   /// getFrameRegister - This method should return the register used as a base
1093   /// for values allocated in the current stack frame.
1094   virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
1095 
1096   /// Mark a register and all its aliases as reserved in the given set.
1097   void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
1098 
1099   /// Returns true if for every register in the set all super registers are part
1100   /// of the set as well.
1101   bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
1102       ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
1103 
1104   virtual const TargetRegisterClass *
1105   getConstrainedRegClassForOperand(const MachineOperand &MO,
1106                                    const MachineRegisterInfo &MRI) const {
1107     return nullptr;
1108   }
1109 
1110   /// Returns the physical register number of sub-register "Index"
1111   /// for physical register RegNo. Return zero if the sub-register does not
1112   /// exist.
1113   inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
1114     return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
1115   }
1116 
1117   /// Some targets have non-allocatable registers that aren't technically part
1118   /// of the explicit callee saved register list, but should be handled as such
1119   /// in certain cases.
1120   virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
1121     return false;
1122   }
1123 };
1124 
1125 //===----------------------------------------------------------------------===//
1126 //                           SuperRegClassIterator
1127 //===----------------------------------------------------------------------===//
1128 //
1129 // Iterate over the possible super-registers for a given register class. The
1130 // iterator will visit a list of pairs (Idx, Mask) corresponding to the
1131 // possible classes of super-registers.
1132 //
1133 // Each bit mask will have at least one set bit, and each set bit in Mask
1134 // corresponds to a SuperRC such that:
1135 //
1136 //   For all Reg in SuperRC: Reg:Idx is in RC.
1137 //
1138 // The iterator can include (O, RC->getSubClassMask()) as the first entry which
1139 // also satisfies the above requirement, assuming Reg:0 == Reg.
1140 //
1141 class SuperRegClassIterator {
1142   const unsigned RCMaskWords;
1143   unsigned SubReg = 0;
1144   const uint16_t *Idx;
1145   const uint32_t *Mask;
1146 
1147 public:
1148   /// Create a SuperRegClassIterator that visits all the super-register classes
1149   /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
1150   SuperRegClassIterator(const TargetRegisterClass *RC,
1151                         const TargetRegisterInfo *TRI,
1152                         bool IncludeSelf = false)
1153     : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
1154       Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
1155     if (!IncludeSelf)
1156       ++*this;
1157   }
1158 
1159   /// Returns true if this iterator is still pointing at a valid entry.
1160   bool isValid() const { return Idx; }
1161 
1162   /// Returns the current sub-register index.
1163   unsigned getSubReg() const { return SubReg; }
1164 
1165   /// Returns the bit mask of register classes that getSubReg() projects into
1166   /// RC.
1167   /// See TargetRegisterClass::getSubClassMask() for how to use it.
1168   const uint32_t *getMask() const { return Mask; }
1169 
1170   /// Advance iterator to the next entry.
1171   void operator++() {
1172     assert(isValid() && "Cannot move iterator past end.");
1173     Mask += RCMaskWords;
1174     SubReg = *Idx++;
1175     if (!SubReg)
1176       Idx = nullptr;
1177   }
1178 };
1179 
1180 //===----------------------------------------------------------------------===//
1181 //                           BitMaskClassIterator
1182 //===----------------------------------------------------------------------===//
1183 /// This class encapuslates the logic to iterate over bitmask returned by
1184 /// the various RegClass related APIs.
1185 /// E.g., this class can be used to iterate over the subclasses provided by
1186 /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
1187 class BitMaskClassIterator {
1188   /// Total number of register classes.
1189   const unsigned NumRegClasses;
1190   /// Base index of CurrentChunk.
1191   /// In other words, the number of bit we read to get at the
1192   /// beginning of that chunck.
1193   unsigned Base = 0;
1194   /// Adjust base index of CurrentChunk.
1195   /// Base index + how many bit we read within CurrentChunk.
1196   unsigned Idx = 0;
1197   /// Current register class ID.
1198   unsigned ID = 0;
1199   /// Mask we are iterating over.
1200   const uint32_t *Mask;
1201   /// Current chunk of the Mask we are traversing.
1202   uint32_t CurrentChunk;
1203 
1204   /// Move ID to the next set bit.
1205   void moveToNextID() {
1206     // If the current chunk of memory is empty, move to the next one,
1207     // while making sure we do not go pass the number of register
1208     // classes.
1209     while (!CurrentChunk) {
1210       // Move to the next chunk.
1211       Base += 32;
1212       if (Base >= NumRegClasses) {
1213         ID = NumRegClasses;
1214         return;
1215       }
1216       CurrentChunk = *++Mask;
1217       Idx = Base;
1218     }
1219     // Otherwise look for the first bit set from the right
1220     // (representation of the class ID is big endian).
1221     // See getSubClassMask for more details on the representation.
1222     unsigned Offset = countTrailingZeros(CurrentChunk);
1223     // Add the Offset to the adjusted base number of this chunk: Idx.
1224     // This is the ID of the register class.
1225     ID = Idx + Offset;
1226 
1227     // Consume the zeros, if any, and the bit we just read
1228     // so that we are at the right spot for the next call.
1229     // Do not do Offset + 1 because Offset may be 31 and 32
1230     // will be UB for the shift, though in that case we could
1231     // have make the chunk being equal to 0, but that would
1232     // have introduced a if statement.
1233     moveNBits(Offset);
1234     moveNBits(1);
1235   }
1236 
1237   /// Move \p NumBits Bits forward in CurrentChunk.
1238   void moveNBits(unsigned NumBits) {
1239     assert(NumBits < 32 && "Undefined behavior spotted!");
1240     // Consume the bit we read for the next call.
1241     CurrentChunk >>= NumBits;
1242     // Adjust the base for the chunk.
1243     Idx += NumBits;
1244   }
1245 
1246 public:
1247   /// Create a BitMaskClassIterator that visits all the register classes
1248   /// represented by \p Mask.
1249   ///
1250   /// \pre \p Mask != nullptr
1251   BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
1252       : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
1253     // Move to the first ID.
1254     moveToNextID();
1255   }
1256 
1257   /// Returns true if this iterator is still pointing at a valid entry.
1258   bool isValid() const { return getID() != NumRegClasses; }
1259 
1260   /// Returns the current register class ID.
1261   unsigned getID() const { return ID; }
1262 
1263   /// Advance iterator to the next entry.
1264   void operator++() {
1265     assert(isValid() && "Cannot move iterator past end.");
1266     moveToNextID();
1267   }
1268 };
1269 
1270 // This is useful when building IndexedMaps keyed on virtual registers
1271 struct VirtReg2IndexFunctor {
1272   using argument_type = Register;
1273   unsigned operator()(Register Reg) const {
1274     return Register::virtReg2Index(Reg);
1275   }
1276 };
1277 
1278 /// Prints virtual and physical registers with or without a TRI instance.
1279 ///
1280 /// The format is:
1281 ///   %noreg          - NoRegister
1282 ///   %5              - a virtual register.
1283 ///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
1284 ///   %eax            - a physical register
1285 ///   %physreg17      - a physical register when no TRI instance given.
1286 ///
1287 /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
1288 Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
1289                    unsigned SubIdx = 0,
1290                    const MachineRegisterInfo *MRI = nullptr);
1291 
1292 /// Create Printable object to print register units on a \ref raw_ostream.
1293 ///
1294 /// Register units are named after their root registers:
1295 ///
1296 ///   al      - Single root.
1297 ///   fp0~st7 - Dual roots.
1298 ///
1299 /// Usage: OS << printRegUnit(Unit, TRI) << '\n';
1300 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
1301 
1302 /// Create Printable object to print virtual registers and physical
1303 /// registers on a \ref raw_ostream.
1304 Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
1305 
1306 /// Create Printable object to print register classes or register banks
1307 /// on a \ref raw_ostream.
1308 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
1309                               const TargetRegisterInfo *TRI);
1310 
1311 } // end namespace llvm
1312 
1313 #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
1314