1 //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the PBQPBuilder interface, for classes which build PBQP
11 // instances to represent register allocation problems, and the RegAllocPBQP
12 // interface.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17 #define LLVM_CODEGEN_REGALLOCPBQP_H
18
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/PBQP/CostAllocator.h"
21 #include "llvm/CodeGen/PBQP/ReductionRules.h"
22 #include "llvm/CodeGen/PBQPRAConstraint.h"
23 #include "llvm/Support/ErrorHandling.h"
24
25 namespace llvm {
26 namespace PBQP {
27 namespace RegAlloc {
28
29 /// @brief Spill option index.
getSpillOptionIdx()30 inline unsigned getSpillOptionIdx() { return 0; }
31
32 /// \brief Metadata to speed allocatability test.
33 ///
34 /// Keeps track of the number of infinities in each row and column.
35 class MatrixMetadata {
36 private:
37 MatrixMetadata(const MatrixMetadata&);
38 void operator=(const MatrixMetadata&);
39 public:
MatrixMetadata(const Matrix & M)40 MatrixMetadata(const Matrix& M)
41 : WorstRow(0), WorstCol(0),
42 UnsafeRows(new bool[M.getRows() - 1]()),
43 UnsafeCols(new bool[M.getCols() - 1]()) {
44
45 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
46
47 for (unsigned i = 1; i < M.getRows(); ++i) {
48 unsigned RowCount = 0;
49 for (unsigned j = 1; j < M.getCols(); ++j) {
50 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
51 ++RowCount;
52 ++ColCounts[j - 1];
53 UnsafeRows[i - 1] = true;
54 UnsafeCols[j - 1] = true;
55 }
56 }
57 WorstRow = std::max(WorstRow, RowCount);
58 }
59 unsigned WorstColCountForCurRow =
60 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
61 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
62 delete[] ColCounts;
63 }
64
getWorstRow()65 unsigned getWorstRow() const { return WorstRow; }
getWorstCol()66 unsigned getWorstCol() const { return WorstCol; }
getUnsafeRows()67 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
getUnsafeCols()68 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
69
70 private:
71 unsigned WorstRow, WorstCol;
72 std::unique_ptr<bool[]> UnsafeRows;
73 std::unique_ptr<bool[]> UnsafeCols;
74 };
75
76 /// \brief Holds a vector of the allowed physical regs for a vreg.
77 class AllowedRegVector {
78 friend hash_code hash_value(const AllowedRegVector &);
79 public:
80
AllowedRegVector()81 AllowedRegVector() : NumOpts(0), Opts(nullptr) {}
82
AllowedRegVector(const std::vector<unsigned> & OptVec)83 AllowedRegVector(const std::vector<unsigned> &OptVec)
84 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
85 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
86 }
87
AllowedRegVector(const AllowedRegVector & Other)88 AllowedRegVector(const AllowedRegVector &Other)
89 : NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
90 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
91 }
92
AllowedRegVector(AllowedRegVector && Other)93 AllowedRegVector(AllowedRegVector &&Other)
94 : NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
95
96 AllowedRegVector& operator=(const AllowedRegVector &Other) {
97 NumOpts = Other.NumOpts;
98 Opts.reset(new unsigned[NumOpts]);
99 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
100 return *this;
101 }
102
103 AllowedRegVector& operator=(AllowedRegVector &&Other) {
104 NumOpts = std::move(Other.NumOpts);
105 Opts = std::move(Other.Opts);
106 return *this;
107 }
108
size()109 unsigned size() const { return NumOpts; }
110 unsigned operator[](size_t I) const { return Opts[I]; }
111
112 bool operator==(const AllowedRegVector &Other) const {
113 if (NumOpts != Other.NumOpts)
114 return false;
115 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
116 }
117
118 bool operator!=(const AllowedRegVector &Other) const {
119 return !(*this == Other);
120 }
121
122 private:
123 unsigned NumOpts;
124 std::unique_ptr<unsigned[]> Opts;
125 };
126
hash_value(const AllowedRegVector & OptRegs)127 inline hash_code hash_value(const AllowedRegVector &OptRegs) {
128 unsigned *OStart = OptRegs.Opts.get();
129 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
130 return hash_combine(OptRegs.NumOpts,
131 hash_combine_range(OStart, OEnd));
132 }
133
134 /// \brief Holds graph-level metadata relevent to PBQP RA problems.
135 class GraphMetadata {
136 private:
137 typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
138 public:
139
140 typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
141
GraphMetadata(MachineFunction & MF,LiveIntervals & LIS,MachineBlockFrequencyInfo & MBFI)142 GraphMetadata(MachineFunction &MF,
143 LiveIntervals &LIS,
144 MachineBlockFrequencyInfo &MBFI)
145 : MF(MF), LIS(LIS), MBFI(MBFI) {}
146
147 MachineFunction &MF;
148 LiveIntervals &LIS;
149 MachineBlockFrequencyInfo &MBFI;
150
setNodeIdForVReg(unsigned VReg,GraphBase::NodeId NId)151 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
152 VRegToNodeId[VReg] = NId;
153 }
154
getNodeIdForVReg(unsigned VReg)155 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
156 auto VRegItr = VRegToNodeId.find(VReg);
157 if (VRegItr == VRegToNodeId.end())
158 return GraphBase::invalidNodeId();
159 return VRegItr->second;
160 }
161
eraseNodeIdForVReg(unsigned VReg)162 void eraseNodeIdForVReg(unsigned VReg) {
163 VRegToNodeId.erase(VReg);
164 }
165
getAllowedRegs(AllowedRegVector Allowed)166 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
167 return AllowedRegVecs.getValue(std::move(Allowed));
168 }
169
170 private:
171 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
172 AllowedRegVecPool AllowedRegVecs;
173 };
174
175 /// \brief Holds solver state and other metadata relevant to each PBQP RA node.
176 class NodeMetadata {
177 public:
178 typedef RegAlloc::AllowedRegVector AllowedRegVector;
179
180 typedef enum { Unprocessed,
181 OptimallyReducible,
182 ConservativelyAllocatable,
183 NotProvablyAllocatable } ReductionState;
184
NodeMetadata()185 NodeMetadata()
186 : RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
187 VReg(0) {}
188
189 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
190 // MSVC synthesizes move constructors properly.
NodeMetadata(const NodeMetadata & Other)191 NodeMetadata(const NodeMetadata &Other)
192 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
193 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
194 AllowedRegs(Other.AllowedRegs) {
195 if (NumOpts > 0) {
196 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
197 &OptUnsafeEdges[0]);
198 }
199 }
200
201 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
202 // MSVC synthesizes move constructors properly.
NodeMetadata(NodeMetadata && Other)203 NodeMetadata(NodeMetadata &&Other)
204 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
205 OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
206 AllowedRegs(std::move(Other.AllowedRegs)) {}
207
208 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
209 // MSVC synthesizes move constructors properly.
210 NodeMetadata& operator=(const NodeMetadata &Other) {
211 RS = Other.RS;
212 NumOpts = Other.NumOpts;
213 DeniedOpts = Other.DeniedOpts;
214 OptUnsafeEdges.reset(new unsigned[NumOpts]);
215 std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
216 OptUnsafeEdges.get());
217 VReg = Other.VReg;
218 AllowedRegs = Other.AllowedRegs;
219 return *this;
220 }
221
222 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
223 // MSVC synthesizes move constructors properly.
224 NodeMetadata& operator=(NodeMetadata &&Other) {
225 RS = Other.RS;
226 NumOpts = Other.NumOpts;
227 DeniedOpts = Other.DeniedOpts;
228 OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
229 VReg = Other.VReg;
230 AllowedRegs = std::move(Other.AllowedRegs);
231 return *this;
232 }
233
setVReg(unsigned VReg)234 void setVReg(unsigned VReg) { this->VReg = VReg; }
getVReg()235 unsigned getVReg() const { return VReg; }
236
setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs)237 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
238 this->AllowedRegs = std::move(AllowedRegs);
239 }
getAllowedRegs()240 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
241
setup(const Vector & Costs)242 void setup(const Vector& Costs) {
243 NumOpts = Costs.getLength() - 1;
244 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
245 }
246
getReductionState()247 ReductionState getReductionState() const { return RS; }
setReductionState(ReductionState RS)248 void setReductionState(ReductionState RS) { this->RS = RS; }
249
handleAddEdge(const MatrixMetadata & MD,bool Transpose)250 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
251 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
252 const bool* UnsafeOpts =
253 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
254 for (unsigned i = 0; i < NumOpts; ++i)
255 OptUnsafeEdges[i] += UnsafeOpts[i];
256 }
257
handleRemoveEdge(const MatrixMetadata & MD,bool Transpose)258 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
259 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
260 const bool* UnsafeOpts =
261 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
262 for (unsigned i = 0; i < NumOpts; ++i)
263 OptUnsafeEdges[i] -= UnsafeOpts[i];
264 }
265
isConservativelyAllocatable()266 bool isConservativelyAllocatable() const {
267 return (DeniedOpts < NumOpts) ||
268 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
269 &OptUnsafeEdges[NumOpts]);
270 }
271
272 private:
273 ReductionState RS;
274 unsigned NumOpts;
275 unsigned DeniedOpts;
276 std::unique_ptr<unsigned[]> OptUnsafeEdges;
277 unsigned VReg;
278 GraphMetadata::AllowedRegVecRef AllowedRegs;
279 };
280
281 class RegAllocSolverImpl {
282 private:
283 typedef MDMatrix<MatrixMetadata> RAMatrix;
284 public:
285 typedef PBQP::Vector RawVector;
286 typedef PBQP::Matrix RawMatrix;
287 typedef PBQP::Vector Vector;
288 typedef RAMatrix Matrix;
289 typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
290
291 typedef GraphBase::NodeId NodeId;
292 typedef GraphBase::EdgeId EdgeId;
293
294 typedef RegAlloc::NodeMetadata NodeMetadata;
295 struct EdgeMetadata { };
296 typedef RegAlloc::GraphMetadata GraphMetadata;
297
298 typedef PBQP::Graph<RegAllocSolverImpl> Graph;
299
RegAllocSolverImpl(Graph & G)300 RegAllocSolverImpl(Graph &G) : G(G) {}
301
solve()302 Solution solve() {
303 G.setSolver(*this);
304 Solution S;
305 setup();
306 S = backpropagate(G, reduce());
307 G.unsetSolver();
308 return S;
309 }
310
handleAddNode(NodeId NId)311 void handleAddNode(NodeId NId) {
312 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
313 }
handleRemoveNode(NodeId NId)314 void handleRemoveNode(NodeId NId) {}
handleSetNodeCosts(NodeId NId,const Vector & newCosts)315 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
316
handleAddEdge(EdgeId EId)317 void handleAddEdge(EdgeId EId) {
318 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
319 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
320 }
321
handleRemoveEdge(EdgeId EId)322 void handleRemoveEdge(EdgeId EId) {
323 handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
324 handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
325 }
326
handleDisconnectEdge(EdgeId EId,NodeId NId)327 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
328 NodeMetadata& NMd = G.getNodeMetadata(NId);
329 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
330 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
331 if (G.getNodeDegree(NId) == 3) {
332 // This node is becoming optimally reducible.
333 moveToOptimallyReducibleNodes(NId);
334 } else if (NMd.getReductionState() ==
335 NodeMetadata::NotProvablyAllocatable &&
336 NMd.isConservativelyAllocatable()) {
337 // This node just became conservatively allocatable.
338 moveToConservativelyAllocatableNodes(NId);
339 }
340 }
341
handleReconnectEdge(EdgeId EId,NodeId NId)342 void handleReconnectEdge(EdgeId EId, NodeId NId) {
343 NodeMetadata& NMd = G.getNodeMetadata(NId);
344 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
345 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
346 }
347
handleSetEdgeCosts(EdgeId EId,const Matrix & NewCosts)348 void handleSetEdgeCosts(EdgeId EId, const Matrix& NewCosts) {
349 handleRemoveEdge(EId);
350
351 NodeId N1Id = G.getEdgeNode1Id(EId);
352 NodeId N2Id = G.getEdgeNode2Id(EId);
353 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
354 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
355 const MatrixMetadata& MMd = NewCosts.getMetadata();
356 N1Md.handleAddEdge(MMd, N1Id != G.getEdgeNode1Id(EId));
357 N2Md.handleAddEdge(MMd, N2Id != G.getEdgeNode1Id(EId));
358 }
359
360 private:
361
removeFromCurrentSet(NodeId NId)362 void removeFromCurrentSet(NodeId NId) {
363 switch (G.getNodeMetadata(NId).getReductionState()) {
364 case NodeMetadata::Unprocessed: break;
365 case NodeMetadata::OptimallyReducible:
366 assert(OptimallyReducibleNodes.find(NId) !=
367 OptimallyReducibleNodes.end() &&
368 "Node not in optimally reducible set.");
369 OptimallyReducibleNodes.erase(NId);
370 break;
371 case NodeMetadata::ConservativelyAllocatable:
372 assert(ConservativelyAllocatableNodes.find(NId) !=
373 ConservativelyAllocatableNodes.end() &&
374 "Node not in conservatively allocatable set.");
375 ConservativelyAllocatableNodes.erase(NId);
376 break;
377 case NodeMetadata::NotProvablyAllocatable:
378 assert(NotProvablyAllocatableNodes.find(NId) !=
379 NotProvablyAllocatableNodes.end() &&
380 "Node not in not-provably-allocatable set.");
381 NotProvablyAllocatableNodes.erase(NId);
382 break;
383 }
384 }
385
moveToOptimallyReducibleNodes(NodeId NId)386 void moveToOptimallyReducibleNodes(NodeId NId) {
387 removeFromCurrentSet(NId);
388 OptimallyReducibleNodes.insert(NId);
389 G.getNodeMetadata(NId).setReductionState(
390 NodeMetadata::OptimallyReducible);
391 }
392
moveToConservativelyAllocatableNodes(NodeId NId)393 void moveToConservativelyAllocatableNodes(NodeId NId) {
394 removeFromCurrentSet(NId);
395 ConservativelyAllocatableNodes.insert(NId);
396 G.getNodeMetadata(NId).setReductionState(
397 NodeMetadata::ConservativelyAllocatable);
398 }
399
moveToNotProvablyAllocatableNodes(NodeId NId)400 void moveToNotProvablyAllocatableNodes(NodeId NId) {
401 removeFromCurrentSet(NId);
402 NotProvablyAllocatableNodes.insert(NId);
403 G.getNodeMetadata(NId).setReductionState(
404 NodeMetadata::NotProvablyAllocatable);
405 }
406
setup()407 void setup() {
408 // Set up worklists.
409 for (auto NId : G.nodeIds()) {
410 if (G.getNodeDegree(NId) < 3)
411 moveToOptimallyReducibleNodes(NId);
412 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
413 moveToConservativelyAllocatableNodes(NId);
414 else
415 moveToNotProvablyAllocatableNodes(NId);
416 }
417 }
418
419 // Compute a reduction order for the graph by iteratively applying PBQP
420 // reduction rules. Locally optimal rules are applied whenever possible (R0,
421 // R1, R2). If no locally-optimal rules apply then any conservatively
422 // allocatable node is reduced. Finally, if no conservatively allocatable
423 // node exists then the node with the lowest spill-cost:degree ratio is
424 // selected.
reduce()425 std::vector<GraphBase::NodeId> reduce() {
426 assert(!G.empty() && "Cannot reduce empty graph.");
427
428 typedef GraphBase::NodeId NodeId;
429 std::vector<NodeId> NodeStack;
430
431 // Consume worklists.
432 while (true) {
433 if (!OptimallyReducibleNodes.empty()) {
434 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
435 NodeId NId = *NItr;
436 OptimallyReducibleNodes.erase(NItr);
437 NodeStack.push_back(NId);
438 switch (G.getNodeDegree(NId)) {
439 case 0:
440 break;
441 case 1:
442 applyR1(G, NId);
443 break;
444 case 2:
445 applyR2(G, NId);
446 break;
447 default: llvm_unreachable("Not an optimally reducible node.");
448 }
449 } else if (!ConservativelyAllocatableNodes.empty()) {
450 // Conservatively allocatable nodes will never spill. For now just
451 // take the first node in the set and push it on the stack. When we
452 // start optimizing more heavily for register preferencing, it may
453 // would be better to push nodes with lower 'expected' or worst-case
454 // register costs first (since early nodes are the most
455 // constrained).
456 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
457 NodeId NId = *NItr;
458 ConservativelyAllocatableNodes.erase(NItr);
459 NodeStack.push_back(NId);
460 G.disconnectAllNeighborsFromNode(NId);
461
462 } else if (!NotProvablyAllocatableNodes.empty()) {
463 NodeSet::iterator NItr =
464 std::min_element(NotProvablyAllocatableNodes.begin(),
465 NotProvablyAllocatableNodes.end(),
466 SpillCostComparator(G));
467 NodeId NId = *NItr;
468 NotProvablyAllocatableNodes.erase(NItr);
469 NodeStack.push_back(NId);
470 G.disconnectAllNeighborsFromNode(NId);
471 } else
472 break;
473 }
474
475 return NodeStack;
476 }
477
478 class SpillCostComparator {
479 public:
SpillCostComparator(const Graph & G)480 SpillCostComparator(const Graph& G) : G(G) {}
operator()481 bool operator()(NodeId N1Id, NodeId N2Id) {
482 PBQPNum N1SC = G.getNodeCosts(N1Id)[0] / G.getNodeDegree(N1Id);
483 PBQPNum N2SC = G.getNodeCosts(N2Id)[0] / G.getNodeDegree(N2Id);
484 return N1SC < N2SC;
485 }
486 private:
487 const Graph& G;
488 };
489
490 Graph& G;
491 typedef std::set<NodeId> NodeSet;
492 NodeSet OptimallyReducibleNodes;
493 NodeSet ConservativelyAllocatableNodes;
494 NodeSet NotProvablyAllocatableNodes;
495 };
496
497 class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
498 private:
499 typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
500 public:
PBQPRAGraph(GraphMetadata Metadata)501 PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
502 };
503
solve(PBQPRAGraph & G)504 inline Solution solve(PBQPRAGraph& G) {
505 if (G.empty())
506 return Solution();
507 RegAllocSolverImpl RegAllocSolver(G);
508 return RegAllocSolver.solve();
509 }
510
511 } // namespace RegAlloc
512 } // namespace PBQP
513
514 /// @brief Create a PBQP register allocator instance.
515 FunctionPass *
516 createPBQPRegisterAllocator(char *customPassID = nullptr);
517
518 } // namespace llvm
519
520 #endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
521