1 //===-- SchedClassResolution.cpp --------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "SchedClassResolution.h"
10 #include "BenchmarkResult.h"
11 #include "llvm/ADT/STLExtras.h"
12 #include "llvm/MC/MCAsmInfo.h"
13 #include "llvm/Support/FormatVariadic.h"
14 #include <limits>
15 #include <unordered_set>
16 #include <vector>
17
18 namespace llvm {
19 namespace exegesis {
20
21 // Return the non-redundant list of WriteProcRes used by the given sched class.
22 // The scheduling model for LLVM is such that each instruction has a certain
23 // number of uops which consume resources which are described by WriteProcRes
24 // entries. Each entry describe how many cycles are spent on a specific ProcRes
25 // kind.
26 // For example, an instruction might have 3 uOps, one dispatching on P0
27 // (ProcResIdx=1) and two on P06 (ProcResIdx = 7).
28 // Note that LLVM additionally denormalizes resource consumption to include
29 // usage of super resources by subresources. So in practice if there exists a
30 // P016 (ProcResIdx=10), then the cycles consumed by P0 are also consumed by
31 // P06 (ProcResIdx = 7) and P016 (ProcResIdx = 10), and the resources consumed
32 // by P06 are also consumed by P016. In the figure below, parenthesized cycles
33 // denote implied usage of superresources by subresources:
34 // P0 P06 P016
35 // uOp1 1 (1) (1)
36 // uOp2 1 (1)
37 // uOp3 1 (1)
38 // =============================
39 // 1 3 3
40 // Eventually we end up with three entries for the WriteProcRes of the
41 // instruction:
42 // {ProcResIdx=1, Cycles=1} // P0
43 // {ProcResIdx=7, Cycles=3} // P06
44 // {ProcResIdx=10, Cycles=3} // P016
45 //
46 // Note that in this case, P016 does not contribute any cycles, so it would
47 // be removed by this function.
48 // FIXME: Move this to MCSubtargetInfo and use it in llvm-mca.
49 static SmallVector<MCWriteProcResEntry, 8>
getNonRedundantWriteProcRes(const MCSchedClassDesc & SCDesc,const MCSubtargetInfo & STI)50 getNonRedundantWriteProcRes(const MCSchedClassDesc &SCDesc,
51 const MCSubtargetInfo &STI) {
52 SmallVector<MCWriteProcResEntry, 8> Result;
53 const auto &SM = STI.getSchedModel();
54 const unsigned NumProcRes = SM.getNumProcResourceKinds();
55
56 // This assumes that the ProcResDescs are sorted in topological order, which
57 // is guaranteed by the tablegen backend.
58 SmallVector<float, 32> ProcResUnitUsage(NumProcRes);
59 for (const auto *WPR = STI.getWriteProcResBegin(&SCDesc),
60 *const WPREnd = STI.getWriteProcResEnd(&SCDesc);
61 WPR != WPREnd; ++WPR) {
62 const MCProcResourceDesc *const ProcResDesc =
63 SM.getProcResource(WPR->ProcResourceIdx);
64 if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
65 // This is a ProcResUnit.
66 Result.push_back({WPR->ProcResourceIdx, WPR->Cycles});
67 ProcResUnitUsage[WPR->ProcResourceIdx] += WPR->Cycles;
68 } else {
69 // This is a ProcResGroup. First see if it contributes any cycles or if
70 // it has cycles just from subunits.
71 float RemainingCycles = WPR->Cycles;
72 for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
73 SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
74 ++SubResIdx) {
75 RemainingCycles -= ProcResUnitUsage[*SubResIdx];
76 }
77 if (RemainingCycles < 0.01f) {
78 // The ProcResGroup contributes no cycles of its own.
79 continue;
80 }
81 // The ProcResGroup contributes `RemainingCycles` cycles of its own.
82 Result.push_back({WPR->ProcResourceIdx,
83 static_cast<uint16_t>(std::round(RemainingCycles))});
84 // Spread the remaining cycles over all subunits.
85 for (const auto *SubResIdx = ProcResDesc->SubUnitsIdxBegin;
86 SubResIdx != ProcResDesc->SubUnitsIdxBegin + ProcResDesc->NumUnits;
87 ++SubResIdx) {
88 ProcResUnitUsage[*SubResIdx] += RemainingCycles / ProcResDesc->NumUnits;
89 }
90 }
91 }
92 return Result;
93 }
94
95 // Distributes a pressure budget as evenly as possible on the provided subunits
96 // given the already existing port pressure distribution.
97 //
98 // The algorithm is as follows: while there is remaining pressure to
99 // distribute, find the subunits with minimal pressure, and distribute
100 // remaining pressure equally up to the pressure of the unit with
101 // second-to-minimal pressure.
102 // For example, let's assume we want to distribute 2*P1256
103 // (Subunits = [P1,P2,P5,P6]), and the starting DensePressure is:
104 // DensePressure = P0 P1 P2 P3 P4 P5 P6 P7
105 // 0.1 0.3 0.2 0.0 0.0 0.5 0.5 0.5
106 // RemainingPressure = 2.0
107 // We sort the subunits by pressure:
108 // Subunits = [(P2,p=0.2), (P1,p=0.3), (P5,p=0.5), (P6, p=0.5)]
109 // We'll first start by the subunits with minimal pressure, which are at
110 // the beginning of the sorted array. In this example there is one (P2).
111 // The subunit with second-to-minimal pressure is the next one in the
112 // array (P1). So we distribute 0.1 pressure to P2, and remove 0.1 cycles
113 // from the budget.
114 // Subunits = [(P2,p=0.3), (P1,p=0.3), (P5,p=0.5), (P5,p=0.5)]
115 // RemainingPressure = 1.9
116 // We repeat this process: distribute 0.2 pressure on each of the minimal
117 // P2 and P1, decrease budget by 2*0.2:
118 // Subunits = [(P2,p=0.5), (P1,p=0.5), (P5,p=0.5), (P5,p=0.5)]
119 // RemainingPressure = 1.5
120 // There are no second-to-minimal subunits so we just share the remaining
121 // budget (1.5 cycles) equally:
122 // Subunits = [(P2,p=0.875), (P1,p=0.875), (P5,p=0.875), (P5,p=0.875)]
123 // RemainingPressure = 0.0
124 // We stop as there is no remaining budget to distribute.
distributePressure(float RemainingPressure,SmallVector<uint16_t,32> Subunits,SmallVector<float,32> & DensePressure)125 static void distributePressure(float RemainingPressure,
126 SmallVector<uint16_t, 32> Subunits,
127 SmallVector<float, 32> &DensePressure) {
128 // Find the number of subunits with minimal pressure (they are at the
129 // front).
130 sort(Subunits, [&DensePressure](const uint16_t A, const uint16_t B) {
131 return DensePressure[A] < DensePressure[B];
132 });
133 const auto getPressureForSubunit = [&DensePressure,
134 &Subunits](size_t I) -> float & {
135 return DensePressure[Subunits[I]];
136 };
137 size_t NumMinimalSU = 1;
138 while (NumMinimalSU < Subunits.size() &&
139 getPressureForSubunit(NumMinimalSU) == getPressureForSubunit(0)) {
140 ++NumMinimalSU;
141 }
142 while (RemainingPressure > 0.0f) {
143 if (NumMinimalSU == Subunits.size()) {
144 // All units are minimal, just distribute evenly and be done.
145 for (size_t I = 0; I < NumMinimalSU; ++I) {
146 getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
147 }
148 return;
149 }
150 // Distribute the remaining pressure equally.
151 const float MinimalPressure = getPressureForSubunit(NumMinimalSU - 1);
152 const float SecondToMinimalPressure = getPressureForSubunit(NumMinimalSU);
153 assert(MinimalPressure < SecondToMinimalPressure);
154 const float Increment = SecondToMinimalPressure - MinimalPressure;
155 if (RemainingPressure <= NumMinimalSU * Increment) {
156 // There is not enough remaining pressure.
157 for (size_t I = 0; I < NumMinimalSU; ++I) {
158 getPressureForSubunit(I) += RemainingPressure / NumMinimalSU;
159 }
160 return;
161 }
162 // Bump all minimal pressure subunits to `SecondToMinimalPressure`.
163 for (size_t I = 0; I < NumMinimalSU; ++I) {
164 getPressureForSubunit(I) = SecondToMinimalPressure;
165 RemainingPressure -= SecondToMinimalPressure;
166 }
167 while (NumMinimalSU < Subunits.size() &&
168 getPressureForSubunit(NumMinimalSU) == SecondToMinimalPressure) {
169 ++NumMinimalSU;
170 }
171 }
172 }
173
174 std::vector<std::pair<uint16_t, float>>
computeIdealizedProcResPressure(const MCSchedModel & SM,SmallVector<MCWriteProcResEntry,8> WPRS)175 computeIdealizedProcResPressure(const MCSchedModel &SM,
176 SmallVector<MCWriteProcResEntry, 8> WPRS) {
177 // DensePressure[I] is the port pressure for Proc Resource I.
178 SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds());
179 sort(WPRS, [](const MCWriteProcResEntry &A, const MCWriteProcResEntry &B) {
180 return A.ProcResourceIdx < B.ProcResourceIdx;
181 });
182 for (const MCWriteProcResEntry &WPR : WPRS) {
183 // Get units for the entry.
184 const MCProcResourceDesc *const ProcResDesc =
185 SM.getProcResource(WPR.ProcResourceIdx);
186 if (ProcResDesc->SubUnitsIdxBegin == nullptr) {
187 // This is a ProcResUnit.
188 DensePressure[WPR.ProcResourceIdx] += WPR.Cycles;
189 } else {
190 // This is a ProcResGroup.
191 SmallVector<uint16_t, 32> Subunits(ProcResDesc->SubUnitsIdxBegin,
192 ProcResDesc->SubUnitsIdxBegin +
193 ProcResDesc->NumUnits);
194 distributePressure(WPR.Cycles, Subunits, DensePressure);
195 }
196 }
197 // Turn dense pressure into sparse pressure by removing zero entries.
198 std::vector<std::pair<uint16_t, float>> Pressure;
199 for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
200 if (DensePressure[I] > 0.0f)
201 Pressure.emplace_back(I, DensePressure[I]);
202 }
203 return Pressure;
204 }
205
ResolvedSchedClass(const MCSubtargetInfo & STI,unsigned ResolvedSchedClassId,bool WasVariant)206 ResolvedSchedClass::ResolvedSchedClass(const MCSubtargetInfo &STI,
207 unsigned ResolvedSchedClassId,
208 bool WasVariant)
209 : SchedClassId(ResolvedSchedClassId),
210 SCDesc(STI.getSchedModel().getSchedClassDesc(ResolvedSchedClassId)),
211 WasVariant(WasVariant),
212 NonRedundantWriteProcRes(getNonRedundantWriteProcRes(*SCDesc, STI)),
213 IdealizedProcResPressure(computeIdealizedProcResPressure(
214 STI.getSchedModel(), NonRedundantWriteProcRes)) {
215 assert((SCDesc == nullptr || !SCDesc->isVariant()) &&
216 "ResolvedSchedClass should never be variant");
217 }
218
ResolveVariantSchedClassId(const MCSubtargetInfo & STI,unsigned SchedClassId,const MCInst & MCI)219 static unsigned ResolveVariantSchedClassId(const MCSubtargetInfo &STI,
220 unsigned SchedClassId,
221 const MCInst &MCI) {
222 const auto &SM = STI.getSchedModel();
223 while (SchedClassId && SM.getSchedClassDesc(SchedClassId)->isVariant())
224 SchedClassId =
225 STI.resolveVariantSchedClass(SchedClassId, &MCI, SM.getProcessorID());
226 return SchedClassId;
227 }
228
229 std::pair<unsigned /*SchedClassId*/, bool /*WasVariant*/>
resolveSchedClassId(const MCSubtargetInfo & SubtargetInfo,const MCInstrInfo & InstrInfo,const MCInst & MCI)230 ResolvedSchedClass::resolveSchedClassId(const MCSubtargetInfo &SubtargetInfo,
231 const MCInstrInfo &InstrInfo,
232 const MCInst &MCI) {
233 unsigned SchedClassId = InstrInfo.get(MCI.getOpcode()).getSchedClass();
234 const bool WasVariant = SchedClassId && SubtargetInfo.getSchedModel()
235 .getSchedClassDesc(SchedClassId)
236 ->isVariant();
237 SchedClassId = ResolveVariantSchedClassId(SubtargetInfo, SchedClassId, MCI);
238 return std::make_pair(SchedClassId, WasVariant);
239 }
240
241 // Returns a ProxResIdx by id or name.
findProcResIdx(const MCSubtargetInfo & STI,const StringRef NameOrId)242 static unsigned findProcResIdx(const MCSubtargetInfo &STI,
243 const StringRef NameOrId) {
244 // Interpret the key as an ProcResIdx.
245 unsigned ProcResIdx = 0;
246 if (to_integer(NameOrId, ProcResIdx, 10))
247 return ProcResIdx;
248 // Interpret the key as a ProcRes name.
249 const auto &SchedModel = STI.getSchedModel();
250 for (int I = 0, E = SchedModel.getNumProcResourceKinds(); I < E; ++I) {
251 if (NameOrId == SchedModel.getProcResource(I)->Name)
252 return I;
253 }
254 return 0;
255 }
256
getAsPoint(InstructionBenchmark::ModeE Mode,const MCSubtargetInfo & STI,ArrayRef<PerInstructionStats> Representative) const257 std::vector<BenchmarkMeasure> ResolvedSchedClass::getAsPoint(
258 InstructionBenchmark::ModeE Mode, const MCSubtargetInfo &STI,
259 ArrayRef<PerInstructionStats> Representative) const {
260 const size_t NumMeasurements = Representative.size();
261
262 std::vector<BenchmarkMeasure> SchedClassPoint(NumMeasurements);
263
264 if (Mode == InstructionBenchmark::Latency) {
265 assert(NumMeasurements == 1 && "Latency is a single measure.");
266 BenchmarkMeasure &LatencyMeasure = SchedClassPoint[0];
267
268 // Find the latency.
269 LatencyMeasure.PerInstructionValue = 0.0;
270
271 for (unsigned I = 0; I < SCDesc->NumWriteLatencyEntries; ++I) {
272 const MCWriteLatencyEntry *const WLE =
273 STI.getWriteLatencyEntry(SCDesc, I);
274 LatencyMeasure.PerInstructionValue =
275 std::max<double>(LatencyMeasure.PerInstructionValue, WLE->Cycles);
276 }
277 } else if (Mode == InstructionBenchmark::Uops) {
278 for (auto I : zip(SchedClassPoint, Representative)) {
279 BenchmarkMeasure &Measure = std::get<0>(I);
280 const PerInstructionStats &Stats = std::get<1>(I);
281
282 StringRef Key = Stats.key();
283 uint16_t ProcResIdx = findProcResIdx(STI, Key);
284 if (ProcResIdx > 0) {
285 // Find the pressure on ProcResIdx `Key`.
286 const auto ProcResPressureIt = std::find_if(
287 IdealizedProcResPressure.begin(), IdealizedProcResPressure.end(),
288 [ProcResIdx](const std::pair<uint16_t, float> &WPR) {
289 return WPR.first == ProcResIdx;
290 });
291 Measure.PerInstructionValue =
292 ProcResPressureIt == IdealizedProcResPressure.end()
293 ? 0.0
294 : ProcResPressureIt->second;
295 } else if (Key == "NumMicroOps") {
296 Measure.PerInstructionValue = SCDesc->NumMicroOps;
297 } else {
298 errs() << "expected `key` to be either a ProcResIdx or a ProcRes "
299 "name, got "
300 << Key << "\n";
301 return {};
302 }
303 }
304 } else if (Mode == InstructionBenchmark::InverseThroughput) {
305 assert(NumMeasurements == 1 && "Inverse Throughput is a single measure.");
306 BenchmarkMeasure &RThroughputMeasure = SchedClassPoint[0];
307
308 RThroughputMeasure.PerInstructionValue =
309 MCSchedModel::getReciprocalThroughput(STI, *SCDesc);
310 } else {
311 llvm_unreachable("unimplemented measurement matching mode");
312 }
313
314 return SchedClassPoint;
315 }
316
317 } // namespace exegesis
318 } // namespace llvm
319