1 //===- llvm/Target/TargetSchedule.cpp - Sched Machine Model ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a wrapper around MCSchedModel that allows the interface
10 // to benefit from information currently only available in TargetInstrInfo.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/TargetSchedule.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineOperand.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/MCInstrDesc.h"
21 #include "llvm/MC/MCInstrItineraries.h"
22 #include "llvm/MC/MCSchedule.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include <algorithm>
27 #include <cassert>
28 #include <cstdint>
29 #include <numeric>
30
31 using namespace llvm;
32
33 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
34 cl::desc("Use TargetSchedModel for latency lookup"));
35
36 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
37 cl::desc("Use InstrItineraryData for latency lookup"));
38
hasInstrSchedModel() const39 bool TargetSchedModel::hasInstrSchedModel() const {
40 return EnableSchedModel && SchedModel.hasInstrSchedModel();
41 }
42
hasInstrItineraries() const43 bool TargetSchedModel::hasInstrItineraries() const {
44 return EnableSchedItins && !InstrItins.isEmpty();
45 }
46
init(const TargetSubtargetInfo * TSInfo)47 void TargetSchedModel::init(const TargetSubtargetInfo *TSInfo) {
48 STI = TSInfo;
49 SchedModel = TSInfo->getSchedModel();
50 TII = TSInfo->getInstrInfo();
51 STI->initInstrItins(InstrItins);
52
53 unsigned NumRes = SchedModel.getNumProcResourceKinds();
54 ResourceFactors.resize(NumRes);
55 ResourceLCM = SchedModel.IssueWidth;
56 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
57 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
58 if (NumUnits > 0)
59 ResourceLCM = std::lcm(ResourceLCM, NumUnits);
60 }
61 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
62 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
63 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
64 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
65 }
66 }
67
68 /// Returns true only if instruction is specified as single issue.
mustBeginGroup(const MachineInstr * MI,const MCSchedClassDesc * SC) const69 bool TargetSchedModel::mustBeginGroup(const MachineInstr *MI,
70 const MCSchedClassDesc *SC) const {
71 if (hasInstrSchedModel()) {
72 if (!SC)
73 SC = resolveSchedClass(MI);
74 if (SC->isValid())
75 return SC->BeginGroup;
76 }
77 return false;
78 }
79
mustEndGroup(const MachineInstr * MI,const MCSchedClassDesc * SC) const80 bool TargetSchedModel::mustEndGroup(const MachineInstr *MI,
81 const MCSchedClassDesc *SC) const {
82 if (hasInstrSchedModel()) {
83 if (!SC)
84 SC = resolveSchedClass(MI);
85 if (SC->isValid())
86 return SC->EndGroup;
87 }
88 return false;
89 }
90
getNumMicroOps(const MachineInstr * MI,const MCSchedClassDesc * SC) const91 unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
92 const MCSchedClassDesc *SC) const {
93 if (hasInstrItineraries()) {
94 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
95 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
96 }
97 if (hasInstrSchedModel()) {
98 if (!SC)
99 SC = resolveSchedClass(MI);
100 if (SC->isValid())
101 return SC->NumMicroOps;
102 }
103 return MI->isTransient() ? 0 : 1;
104 }
105
106 // The machine model may explicitly specify an invalid latency, which
107 // effectively means infinite latency. Since users of the TargetSchedule API
108 // don't know how to handle this, we convert it to a very large latency that is
109 // easy to distinguish when debugging the DAG but won't induce overflow.
capLatency(int Cycles)110 static unsigned capLatency(int Cycles) {
111 return Cycles >= 0 ? Cycles : 1000;
112 }
113
114 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
115 /// evaluation of predicates that depend on instruction operands or flags.
116 const MCSchedClassDesc *TargetSchedModel::
resolveSchedClass(const MachineInstr * MI) const117 resolveSchedClass(const MachineInstr *MI) const {
118 // Get the definition's scheduling class descriptor from this machine model.
119 unsigned SchedClass = MI->getDesc().getSchedClass();
120 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
121 if (!SCDesc->isValid())
122 return SCDesc;
123
124 #ifndef NDEBUG
125 unsigned NIter = 0;
126 #endif
127 while (SCDesc->isVariant()) {
128 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
129
130 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
131 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
132 }
133 return SCDesc;
134 }
135
136 /// Find the def index of this operand. This index maps to the machine model and
137 /// is independent of use operands. Def operands may be reordered with uses or
138 /// merged with uses without affecting the def index (e.g. before/after
139 /// regalloc). However, an instruction's def operands must never be reordered
140 /// with respect to each other.
findDefIdx(const MachineInstr * MI,unsigned DefOperIdx)141 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
142 unsigned DefIdx = 0;
143 for (unsigned i = 0; i != DefOperIdx; ++i) {
144 const MachineOperand &MO = MI->getOperand(i);
145 if (MO.isReg() && MO.isDef())
146 ++DefIdx;
147 }
148 return DefIdx;
149 }
150
151 /// Find the use index of this operand. This is independent of the instruction's
152 /// def operands.
153 ///
154 /// Note that uses are not determined by the operand's isUse property, which
155 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
156 /// a "use". The machine model allows an operand to be both a Def and Use.
findUseIdx(const MachineInstr * MI,unsigned UseOperIdx)157 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
158 unsigned UseIdx = 0;
159 for (unsigned i = 0; i != UseOperIdx; ++i) {
160 const MachineOperand &MO = MI->getOperand(i);
161 if (MO.isReg() && MO.readsReg() && !MO.isDef())
162 ++UseIdx;
163 }
164 return UseIdx;
165 }
166
167 // Top-level API for clients that know the operand indices.
computeOperandLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * UseMI,unsigned UseOperIdx) const168 unsigned TargetSchedModel::computeOperandLatency(
169 const MachineInstr *DefMI, unsigned DefOperIdx,
170 const MachineInstr *UseMI, unsigned UseOperIdx) const {
171
172 if (!hasInstrSchedModel() && !hasInstrItineraries())
173 return TII->defaultDefLatency(SchedModel, *DefMI);
174
175 if (hasInstrItineraries()) {
176 int OperLatency = 0;
177 if (UseMI) {
178 OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
179 *UseMI, UseOperIdx);
180 }
181 else {
182 unsigned DefClass = DefMI->getDesc().getSchedClass();
183 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
184 }
185 if (OperLatency >= 0)
186 return OperLatency;
187
188 // No operand latency was found.
189 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
190
191 // Expected latency is the max of the stage latency and itinerary props.
192 // Rather than directly querying InstrItins stage latency, we call a TII
193 // hook to allow subtargets to specialize latency. This hook is only
194 // applicable to the InstrItins model. InstrSchedModel should model all
195 // special cases without TII hooks.
196 InstrLatency =
197 std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
198 return InstrLatency;
199 }
200 // hasInstrSchedModel()
201 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
202 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
203 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
204 // Lookup the definition's write latency in SubtargetInfo.
205 const MCWriteLatencyEntry *WLEntry =
206 STI->getWriteLatencyEntry(SCDesc, DefIdx);
207 unsigned WriteID = WLEntry->WriteResourceID;
208 unsigned Latency = capLatency(WLEntry->Cycles);
209 if (!UseMI)
210 return Latency;
211
212 // Lookup the use's latency adjustment in SubtargetInfo.
213 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
214 if (UseDesc->NumReadAdvanceEntries == 0)
215 return Latency;
216 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
217 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
218 if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
219 return 0;
220 return Latency - Advance;
221 }
222 // If DefIdx does not exist in the model (e.g. implicit defs), then return
223 // unit latency (defaultDefLatency may be too conservative).
224 #ifndef NDEBUG
225 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() &&
226 !DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() &&
227 SchedModel.isComplete()) {
228 errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
229 << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
230 llvm_unreachable("incomplete machine model");
231 }
232 #endif
233 // FIXME: Automatically giving all implicit defs defaultDefLatency is
234 // undesirable. We should only do it for defs that are known to the MC
235 // desc like flags. Truly implicit defs should get 1 cycle latency.
236 return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
237 }
238
239 unsigned
computeInstrLatency(const MCSchedClassDesc & SCDesc) const240 TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
241 return capLatency(MCSchedModel::computeInstrLatency(*STI, SCDesc));
242 }
243
computeInstrLatency(unsigned Opcode) const244 unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
245 assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
246 unsigned SCIdx = TII->get(Opcode).getSchedClass();
247 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
248 }
249
computeInstrLatency(const MCInst & Inst) const250 unsigned TargetSchedModel::computeInstrLatency(const MCInst &Inst) const {
251 if (hasInstrSchedModel())
252 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
253 return computeInstrLatency(Inst.getOpcode());
254 }
255
256 unsigned
computeInstrLatency(const MachineInstr * MI,bool UseDefaultDefLatency) const257 TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
258 bool UseDefaultDefLatency) const {
259 // For the itinerary model, fall back to the old subtarget hook.
260 // Allow subtargets to compute Bundle latencies outside the machine model.
261 if (hasInstrItineraries() || MI->isBundle() ||
262 (!hasInstrSchedModel() && !UseDefaultDefLatency))
263 return TII->getInstrLatency(&InstrItins, *MI);
264
265 if (hasInstrSchedModel()) {
266 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
267 if (SCDesc->isValid())
268 return computeInstrLatency(*SCDesc);
269 }
270 return TII->defaultDefLatency(SchedModel, *MI);
271 }
272
273 unsigned TargetSchedModel::
computeOutputLatency(const MachineInstr * DefMI,unsigned DefOperIdx,const MachineInstr * DepMI) const274 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
275 const MachineInstr *DepMI) const {
276 if (!SchedModel.isOutOfOrder())
277 return 1;
278
279 // Out-of-order processor can dispatch WAW dependencies in the same cycle.
280
281 // Treat predication as a data dependency for out-of-order cpus. In-order
282 // cpus do not need to treat predicated writes specially.
283 //
284 // TODO: The following hack exists because predication passes do not
285 // correctly append imp-use operands, and readsReg() strangely returns false
286 // for predicated defs.
287 Register Reg = DefMI->getOperand(DefOperIdx).getReg();
288 const MachineFunction &MF = *DefMI->getMF();
289 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
290 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))
291 return computeInstrLatency(DefMI);
292
293 // If we have a per operand scheduling model, check if this def is writing
294 // an unbuffered resource. If so, it treated like an in-order cpu.
295 if (hasInstrSchedModel()) {
296 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
297 if (SCDesc->isValid()) {
298 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
299 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
300 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
301 return 1;
302 }
303 }
304 }
305 return 0;
306 }
307
308 double
computeReciprocalThroughput(const MachineInstr * MI) const309 TargetSchedModel::computeReciprocalThroughput(const MachineInstr *MI) const {
310 if (hasInstrItineraries()) {
311 unsigned SchedClass = MI->getDesc().getSchedClass();
312 return MCSchedModel::getReciprocalThroughput(SchedClass,
313 *getInstrItineraries());
314 }
315
316 if (hasInstrSchedModel())
317 return MCSchedModel::getReciprocalThroughput(*STI, *resolveSchedClass(MI));
318
319 return 0.0;
320 }
321
322 double
computeReciprocalThroughput(unsigned Opcode) const323 TargetSchedModel::computeReciprocalThroughput(unsigned Opcode) const {
324 unsigned SchedClass = TII->get(Opcode).getSchedClass();
325 if (hasInstrItineraries())
326 return MCSchedModel::getReciprocalThroughput(SchedClass,
327 *getInstrItineraries());
328 if (hasInstrSchedModel()) {
329 const MCSchedClassDesc &SCDesc = *SchedModel.getSchedClassDesc(SchedClass);
330 if (SCDesc.isValid() && !SCDesc.isVariant())
331 return MCSchedModel::getReciprocalThroughput(*STI, SCDesc);
332 }
333
334 return 0.0;
335 }
336
337 double
computeReciprocalThroughput(const MCInst & MI) const338 TargetSchedModel::computeReciprocalThroughput(const MCInst &MI) const {
339 if (hasInstrSchedModel())
340 return SchedModel.getReciprocalThroughput(*STI, *TII, MI);
341 return computeReciprocalThroughput(MI.getOpcode());
342 }
343
344