1 //===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a wrapper around MCSchedModel that allows the interface to
11 // benefit from information currently only available in TargetInstrInfo.
12 // Ideally, the scheduling interface would be fully defined in the MC layer.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
17 #define LLVM_CODEGEN_TARGETSCHEDULE_H
18 
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/MC/MCInstrItineraries.h"
21 #include "llvm/MC/MCSchedule.h"
22 #include "llvm/Target/TargetSubtargetInfo.h"
23 
24 namespace llvm {
25 
26 class TargetRegisterInfo;
27 class TargetSubtargetInfo;
28 class TargetInstrInfo;
29 class MachineInstr;
30 
31 /// Provide an instruction scheduling machine model to CodeGen passes.
32 class TargetSchedModel {
33   // For efficiency, hold a copy of the statically defined MCSchedModel for this
34   // processor.
35   MCSchedModel SchedModel;
36   InstrItineraryData InstrItins;
37   const TargetSubtargetInfo *STI;
38   const TargetInstrInfo *TII;
39 
40   SmallVector<unsigned, 16> ResourceFactors;
41   unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
42   unsigned ResourceLCM;   // Resource units per cycle. Latency normalization factor.
43 public:
TargetSchedModel()44   TargetSchedModel(): SchedModel(MCSchedModel::GetDefaultSchedModel()), STI(nullptr), TII(nullptr) {}
45 
46   /// \brief Initialize the machine model for instruction scheduling.
47   ///
48   /// The machine model API keeps a copy of the top-level MCSchedModel table
49   /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
50   /// dynamic properties.
51   void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
52             const TargetInstrInfo *tii);
53 
54   /// Return the MCSchedClassDesc for this instruction.
55   const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
56 
57   /// \brief TargetInstrInfo getter.
getInstrInfo()58   const TargetInstrInfo *getInstrInfo() const { return TII; }
59 
60   /// \brief Return true if this machine model includes an instruction-level
61   /// scheduling model.
62   ///
63   /// This is more detailed than the course grain IssueWidth and default
64   /// latency properties, but separate from the per-cycle itinerary data.
65   bool hasInstrSchedModel() const;
66 
getMCSchedModel()67   const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
68 
69   /// \brief Return true if this machine model includes cycle-to-cycle itinerary
70   /// data.
71   ///
72   /// This models scheduling at each stage in the processor pipeline.
73   bool hasInstrItineraries() const;
74 
getInstrItineraries()75   const InstrItineraryData *getInstrItineraries() const {
76     if (hasInstrItineraries())
77       return &InstrItins;
78     return nullptr;
79   }
80 
81   /// \brief Identify the processor corresponding to the current subtarget.
getProcessorID()82   unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
83 
84   /// \brief Maximum number of micro-ops that may be scheduled per cycle.
getIssueWidth()85   unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
86 
87   /// \brief Return the number of issue slots required for this MI.
88   unsigned getNumMicroOps(const MachineInstr *MI,
89                           const MCSchedClassDesc *SC = nullptr) const;
90 
91   /// \brief Get the number of kinds of resources for this target.
getNumProcResourceKinds()92   unsigned getNumProcResourceKinds() const {
93     return SchedModel.getNumProcResourceKinds();
94   }
95 
96   /// \brief Get a processor resource by ID for convenience.
getProcResource(unsigned PIdx)97   const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
98     return SchedModel.getProcResource(PIdx);
99   }
100 
101 #ifndef NDEBUG
getResourceName(unsigned PIdx)102   const char *getResourceName(unsigned PIdx) const {
103     if (!PIdx)
104       return "MOps";
105     return SchedModel.getProcResource(PIdx)->Name;
106   }
107 #endif
108 
109   typedef const MCWriteProcResEntry *ProcResIter;
110 
111   // \brief Get an iterator into the processor resources consumed by this
112   // scheduling class.
getWriteProcResBegin(const MCSchedClassDesc * SC)113   ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
114     // The subtarget holds a single resource table for all processors.
115     return STI->getWriteProcResBegin(SC);
116   }
getWriteProcResEnd(const MCSchedClassDesc * SC)117   ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
118     return STI->getWriteProcResEnd(SC);
119   }
120 
121   /// \brief Multiply the number of units consumed for a resource by this factor
122   /// to normalize it relative to other resources.
getResourceFactor(unsigned ResIdx)123   unsigned getResourceFactor(unsigned ResIdx) const {
124     return ResourceFactors[ResIdx];
125   }
126 
127   /// \brief Multiply number of micro-ops by this factor to normalize it
128   /// relative to other resources.
getMicroOpFactor()129   unsigned getMicroOpFactor() const {
130     return MicroOpFactor;
131   }
132 
133   /// \brief Multiply cycle count by this factor to normalize it relative to
134   /// other resources. This is the number of resource units per cycle.
getLatencyFactor()135   unsigned getLatencyFactor() const {
136     return ResourceLCM;
137   }
138 
139   /// \brief Number of micro-ops that may be buffered for OOO execution.
getMicroOpBufferSize()140   unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
141 
142   /// \brief Number of resource units that may be buffered for OOO execution.
143   /// \return The buffer size in resource units or -1 for unlimited.
getResourceBufferSize(unsigned PIdx)144   int getResourceBufferSize(unsigned PIdx) const {
145     return SchedModel.getProcResource(PIdx)->BufferSize;
146   }
147 
148   /// \brief Compute operand latency based on the available machine model.
149   ///
150   /// Compute and return the latency of the given data dependent def and use
151   /// when the operand indices are already known. UseMI may be NULL for an
152   /// unknown user.
153   unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
154                                  const MachineInstr *UseMI, unsigned UseOperIdx)
155     const;
156 
157   /// \brief Compute the instruction latency based on the available machine
158   /// model.
159   ///
160   /// Compute and return the expected latency of this instruction independent of
161   /// a particular use. computeOperandLatency is the preferred API, but this is
162   /// occasionally useful to help estimate instruction cost.
163   ///
164   /// If UseDefaultDefLatency is false and no new machine sched model is
165   /// present this method falls back to TII->getInstrLatency with an empty
166   /// instruction itinerary (this is so we preserve the previous behavior of the
167   /// if converter after moving it to TargetSchedModel).
168   unsigned computeInstrLatency(const MachineInstr *MI,
169                                bool UseDefaultDefLatency = true) const;
170   unsigned computeInstrLatency(unsigned Opcode) const;
171 
172   /// \brief Output dependency latency of a pair of defs of the same register.
173   ///
174   /// This is typically one cycle.
175   unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
176                                 const MachineInstr *DepMI) const;
177 };
178 
179 } // namespace llvm
180 
181 #endif
182