1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the RAGreedy function pass for register allocation in
10 // optimized builds.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AllocationOrder.h"
15 #include "InterferenceCache.h"
16 #include "LiveDebugVariables.h"
17 #include "RegAllocBase.h"
18 #include "SpillPlacement.h"
19 #include "SplitKit.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/IndexedMap.h"
24 #include "llvm/ADT/MapVector.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/Analysis/AliasAnalysis.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/CodeGen/CalcSpillWeights.h"
34 #include "llvm/CodeGen/EdgeBundles.h"
35 #include "llvm/CodeGen/LiveInterval.h"
36 #include "llvm/CodeGen/LiveIntervalUnion.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeEdit.h"
39 #include "llvm/CodeGen/LiveRegMatrix.h"
40 #include "llvm/CodeGen/LiveStacks.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
43 #include "llvm/CodeGen/MachineDominators.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineFunctionPass.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineLoopInfo.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/CodeGen/RegAllocRegistry.h"
53 #include "llvm/CodeGen/RegisterClassInfo.h"
54 #include "llvm/CodeGen/SlotIndexes.h"
55 #include "llvm/CodeGen/Spiller.h"
56 #include "llvm/CodeGen/TargetInstrInfo.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/CodeGen/VirtRegMap.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/MC/MCRegisterInfo.h"
63 #include "llvm/Pass.h"
64 #include "llvm/Support/BlockFrequency.h"
65 #include "llvm/Support/BranchProbability.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/MathExtras.h"
69 #include "llvm/Support/Timer.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Target/TargetMachine.h"
72 #include "llvm/IR/DebugInfoMetadata.h"
73 #include <algorithm>
74 #include <cassert>
75 #include <cstdint>
76 #include <memory>
77 #include <queue>
78 #include <tuple>
79 #include <utility>
80
81 using namespace llvm;
82
83 #define DEBUG_TYPE "regalloc"
84
85 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
86 STATISTIC(NumLocalSplits, "Number of split local live ranges");
87 STATISTIC(NumEvicted, "Number of interferences evicted");
88
89 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode(
90 "split-spill-mode", cl::Hidden,
91 cl::desc("Spill mode for splitting live ranges"),
92 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
93 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
94 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")),
95 cl::init(SplitEditor::SM_Speed));
96
97 static cl::opt<unsigned>
98 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
99 cl::desc("Last chance recoloring max depth"),
100 cl::init(5));
101
102 static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
103 "lcr-max-interf", cl::Hidden,
104 cl::desc("Last chance recoloring maximum number of considered"
105 " interference at a time"),
106 cl::init(8));
107
108 static cl::opt<bool> ExhaustiveSearch(
109 "exhaustive-register-search", cl::NotHidden,
110 cl::desc("Exhaustive Search for registers bypassing the depth "
111 "and interference cutoffs of last chance recoloring"),
112 cl::Hidden);
113
114 static cl::opt<bool> EnableLocalReassignment(
115 "enable-local-reassign", cl::Hidden,
116 cl::desc("Local reassignment can yield better allocation decisions, but "
117 "may be compile time intensive"),
118 cl::init(false));
119
120 static cl::opt<bool> EnableDeferredSpilling(
121 "enable-deferred-spilling", cl::Hidden,
122 cl::desc("Instead of spilling a variable right away, defer the actual "
123 "code insertion to the end of the allocation. That way the "
124 "allocator might still find a suitable coloring for this "
125 "variable because of other evicted variables."),
126 cl::init(false));
127
128 // FIXME: Find a good default for this flag and remove the flag.
129 static cl::opt<unsigned>
130 CSRFirstTimeCost("regalloc-csr-first-time-cost",
131 cl::desc("Cost for first time use of callee-saved register."),
132 cl::init(0), cl::Hidden);
133
134 static cl::opt<bool> ConsiderLocalIntervalCost(
135 "consider-local-interval-cost", cl::Hidden,
136 cl::desc("Consider the cost of local intervals created by a split "
137 "candidate when choosing the best split candidate."),
138 cl::init(false));
139
140 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
141 createGreedyRegisterAllocator);
142
143 namespace {
144
145 class RAGreedy : public MachineFunctionPass,
146 public RegAllocBase,
147 private LiveRangeEdit::Delegate {
148 // Convenient shortcuts.
149 using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>;
150 using SmallLISet = SmallPtrSet<LiveInterval *, 4>;
151 using SmallVirtRegSet = SmallSet<Register, 16>;
152
153 // context
154 MachineFunction *MF;
155
156 // Shortcuts to some useful interface.
157 const TargetInstrInfo *TII;
158 const TargetRegisterInfo *TRI;
159 RegisterClassInfo RCI;
160
161 // analyses
162 SlotIndexes *Indexes;
163 MachineBlockFrequencyInfo *MBFI;
164 MachineDominatorTree *DomTree;
165 MachineLoopInfo *Loops;
166 MachineOptimizationRemarkEmitter *ORE;
167 EdgeBundles *Bundles;
168 SpillPlacement *SpillPlacer;
169 LiveDebugVariables *DebugVars;
170 AliasAnalysis *AA;
171
172 // state
173 std::unique_ptr<Spiller> SpillerInstance;
174 PQueue Queue;
175 unsigned NextCascade;
176 std::unique_ptr<VirtRegAuxInfo> VRAI;
177
178 // Live ranges pass through a number of stages as we try to allocate them.
179 // Some of the stages may also create new live ranges:
180 //
181 // - Region splitting.
182 // - Per-block splitting.
183 // - Local splitting.
184 // - Spilling.
185 //
186 // Ranges produced by one of the stages skip the previous stages when they are
187 // dequeued. This improves performance because we can skip interference checks
188 // that are unlikely to give any results. It also guarantees that the live
189 // range splitting algorithm terminates, something that is otherwise hard to
190 // ensure.
191 enum LiveRangeStage {
192 /// Newly created live range that has never been queued.
193 RS_New,
194
195 /// Only attempt assignment and eviction. Then requeue as RS_Split.
196 RS_Assign,
197
198 /// Attempt live range splitting if assignment is impossible.
199 RS_Split,
200
201 /// Attempt more aggressive live range splitting that is guaranteed to make
202 /// progress. This is used for split products that may not be making
203 /// progress.
204 RS_Split2,
205
206 /// Live range will be spilled. No more splitting will be attempted.
207 RS_Spill,
208
209
210 /// Live range is in memory. Because of other evictions, it might get moved
211 /// in a register in the end.
212 RS_Memory,
213
214 /// There is nothing more we can do to this live range. Abort compilation
215 /// if it can't be assigned.
216 RS_Done
217 };
218
219 // Enum CutOffStage to keep a track whether the register allocation failed
220 // because of the cutoffs encountered in last chance recoloring.
221 // Note: This is used as bitmask. New value should be next power of 2.
222 enum CutOffStage {
223 // No cutoffs encountered
224 CO_None = 0,
225
226 // lcr-max-depth cutoff encountered
227 CO_Depth = 1,
228
229 // lcr-max-interf cutoff encountered
230 CO_Interf = 2
231 };
232
233 uint8_t CutOffInfo;
234
235 #ifndef NDEBUG
236 static const char *const StageName[];
237 #endif
238
239 // RegInfo - Keep additional information about each live range.
240 struct RegInfo {
241 LiveRangeStage Stage = RS_New;
242
243 // Cascade - Eviction loop prevention. See canEvictInterference().
244 unsigned Cascade = 0;
245
246 RegInfo() = default;
247 };
248
249 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
250
getStage(const LiveInterval & VirtReg) const251 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
252 return ExtraRegInfo[VirtReg.reg()].Stage;
253 }
254
setStage(const LiveInterval & VirtReg,LiveRangeStage Stage)255 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
256 ExtraRegInfo.resize(MRI->getNumVirtRegs());
257 ExtraRegInfo[VirtReg.reg()].Stage = Stage;
258 }
259
260 template<typename Iterator>
setStage(Iterator Begin,Iterator End,LiveRangeStage NewStage)261 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
262 ExtraRegInfo.resize(MRI->getNumVirtRegs());
263 for (;Begin != End; ++Begin) {
264 Register Reg = *Begin;
265 if (ExtraRegInfo[Reg].Stage == RS_New)
266 ExtraRegInfo[Reg].Stage = NewStage;
267 }
268 }
269
270 /// Cost of evicting interference.
271 struct EvictionCost {
272 unsigned BrokenHints = 0; ///< Total number of broken hints.
273 float MaxWeight = 0; ///< Maximum spill weight evicted.
274
275 EvictionCost() = default;
276
isMax__anoncc1939a30111::RAGreedy::EvictionCost277 bool isMax() const { return BrokenHints == ~0u; }
278
setMax__anoncc1939a30111::RAGreedy::EvictionCost279 void setMax() { BrokenHints = ~0u; }
280
setBrokenHints__anoncc1939a30111::RAGreedy::EvictionCost281 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
282
operator <__anoncc1939a30111::RAGreedy::EvictionCost283 bool operator<(const EvictionCost &O) const {
284 return std::tie(BrokenHints, MaxWeight) <
285 std::tie(O.BrokenHints, O.MaxWeight);
286 }
287 };
288
289 /// EvictionTrack - Keeps track of past evictions in order to optimize region
290 /// split decision.
291 class EvictionTrack {
292
293 public:
294 using EvictorInfo =
295 std::pair<Register /* evictor */, MCRegister /* physreg */>;
296 using EvicteeInfo = llvm::DenseMap<Register /* evictee */, EvictorInfo>;
297
298 private:
299 /// Each Vreg that has been evicted in the last stage of selectOrSplit will
300 /// be mapped to the evictor Vreg and the PhysReg it was evicted from.
301 EvicteeInfo Evictees;
302
303 public:
304 /// Clear all eviction information.
clear()305 void clear() { Evictees.clear(); }
306
307 /// Clear eviction information for the given evictee Vreg.
308 /// E.g. when Vreg get's a new allocation, the old eviction info is no
309 /// longer relevant.
310 /// \param Evictee The evictee Vreg for whom we want to clear collected
311 /// eviction info.
clearEvicteeInfo(Register Evictee)312 void clearEvicteeInfo(Register Evictee) { Evictees.erase(Evictee); }
313
314 /// Track new eviction.
315 /// The Evictor vreg has evicted the Evictee vreg from Physreg.
316 /// \param PhysReg The physical register Evictee was evicted from.
317 /// \param Evictor The evictor Vreg that evicted Evictee.
318 /// \param Evictee The evictee Vreg.
addEviction(MCRegister PhysReg,Register Evictor,Register Evictee)319 void addEviction(MCRegister PhysReg, Register Evictor, Register Evictee) {
320 Evictees[Evictee].first = Evictor;
321 Evictees[Evictee].second = PhysReg;
322 }
323
324 /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg.
325 /// \param Evictee The evictee vreg.
326 /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if
327 /// nobody has evicted Evictee from PhysReg.
getEvictor(Register Evictee)328 EvictorInfo getEvictor(Register Evictee) {
329 if (Evictees.count(Evictee)) {
330 return Evictees[Evictee];
331 }
332
333 return EvictorInfo(0, 0);
334 }
335 };
336
337 // Keeps track of past evictions in order to optimize region split decision.
338 EvictionTrack LastEvicted;
339
340 // splitting state.
341 std::unique_ptr<SplitAnalysis> SA;
342 std::unique_ptr<SplitEditor> SE;
343
344 /// Cached per-block interference maps
345 InterferenceCache IntfCache;
346
347 /// All basic blocks where the current register has uses.
348 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
349
350 /// Global live range splitting candidate info.
351 struct GlobalSplitCandidate {
352 // Register intended for assignment, or 0.
353 MCRegister PhysReg;
354
355 // SplitKit interval index for this candidate.
356 unsigned IntvIdx;
357
358 // Interference for PhysReg.
359 InterferenceCache::Cursor Intf;
360
361 // Bundles where this candidate should be live.
362 BitVector LiveBundles;
363 SmallVector<unsigned, 8> ActiveBlocks;
364
reset__anoncc1939a30111::RAGreedy::GlobalSplitCandidate365 void reset(InterferenceCache &Cache, MCRegister Reg) {
366 PhysReg = Reg;
367 IntvIdx = 0;
368 Intf.setPhysReg(Cache, Reg);
369 LiveBundles.clear();
370 ActiveBlocks.clear();
371 }
372
373 // Set B[I] = C for every live bundle where B[I] was NoCand.
getBundles__anoncc1939a30111::RAGreedy::GlobalSplitCandidate374 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
375 unsigned Count = 0;
376 for (unsigned I : LiveBundles.set_bits())
377 if (B[I] == NoCand) {
378 B[I] = C;
379 Count++;
380 }
381 return Count;
382 }
383 };
384
385 /// Candidate info for each PhysReg in AllocationOrder.
386 /// This vector never shrinks, but grows to the size of the largest register
387 /// class.
388 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
389
390 enum : unsigned { NoCand = ~0u };
391
392 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
393 /// NoCand which indicates the stack interval.
394 SmallVector<unsigned, 32> BundleCand;
395
396 /// Callee-save register cost, calculated once per machine function.
397 BlockFrequency CSRCost;
398
399 /// Run or not the local reassignment heuristic. This information is
400 /// obtained from the TargetSubtargetInfo.
401 bool EnableLocalReassign;
402
403 /// Enable or not the consideration of the cost of local intervals created
404 /// by a split candidate when choosing the best split candidate.
405 bool EnableAdvancedRASplitCost;
406
407 /// Set of broken hints that may be reconciled later because of eviction.
408 SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
409
410 /// The register cost values. This list will be recreated for each Machine
411 /// Function
412 ArrayRef<uint8_t> RegCosts;
413
414 public:
415 RAGreedy(const RegClassFilterFunc F = allocateAllRegClasses);
416
417 /// Return the pass name.
getPassName() const418 StringRef getPassName() const override { return "Greedy Register Allocator"; }
419
420 /// RAGreedy analysis usage.
421 void getAnalysisUsage(AnalysisUsage &AU) const override;
422 void releaseMemory() override;
spiller()423 Spiller &spiller() override { return *SpillerInstance; }
424 void enqueueImpl(LiveInterval *LI) override;
425 LiveInterval *dequeue() override;
426 MCRegister selectOrSplit(LiveInterval &,
427 SmallVectorImpl<Register> &) override;
428 void aboutToRemoveInterval(LiveInterval &) override;
429
430 /// Perform register allocation.
431 bool runOnMachineFunction(MachineFunction &mf) override;
432
getRequiredProperties() const433 MachineFunctionProperties getRequiredProperties() const override {
434 return MachineFunctionProperties().set(
435 MachineFunctionProperties::Property::NoPHIs);
436 }
437
getClearedProperties() const438 MachineFunctionProperties getClearedProperties() const override {
439 return MachineFunctionProperties().set(
440 MachineFunctionProperties::Property::IsSSA);
441 }
442
443 static char ID;
444
445 private:
446 MCRegister selectOrSplitImpl(LiveInterval &, SmallVectorImpl<Register> &,
447 SmallVirtRegSet &, unsigned = 0);
448
449 bool LRE_CanEraseVirtReg(Register) override;
450 void LRE_WillShrinkVirtReg(Register) override;
451 void LRE_DidCloneVirtReg(Register, Register) override;
452 void enqueue(PQueue &CurQueue, LiveInterval *LI);
453 LiveInterval *dequeue(PQueue &CurQueue);
454
455 BlockFrequency calcSpillCost();
456 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
457 bool addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
458 bool growRegion(GlobalSplitCandidate &Cand);
459 bool splitCanCauseEvictionChain(Register Evictee, GlobalSplitCandidate &Cand,
460 unsigned BBNumber,
461 const AllocationOrder &Order);
462 bool splitCanCauseLocalSpill(unsigned VirtRegToSplit,
463 GlobalSplitCandidate &Cand, unsigned BBNumber,
464 const AllocationOrder &Order);
465 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &,
466 const AllocationOrder &Order,
467 bool *CanCauseEvictionChain);
468 bool calcCompactRegion(GlobalSplitCandidate&);
469 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
470 void calcGapWeights(MCRegister, SmallVectorImpl<float> &);
471 Register canReassign(LiveInterval &VirtReg, Register PrevReg) const;
472 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool) const;
473 bool canEvictInterference(LiveInterval &, MCRegister, bool, EvictionCost &,
474 const SmallVirtRegSet &) const;
475 bool canEvictInterferenceInRange(const LiveInterval &VirtReg,
476 MCRegister PhysReg, SlotIndex Start,
477 SlotIndex End, EvictionCost &MaxCost) const;
478 MCRegister getCheapestEvicteeWeight(const AllocationOrder &Order,
479 const LiveInterval &VirtReg,
480 SlotIndex Start, SlotIndex End,
481 float *BestEvictWeight) const;
482 void evictInterference(LiveInterval &, MCRegister,
483 SmallVectorImpl<Register> &);
484 bool mayRecolorAllInterferences(MCRegister PhysReg, LiveInterval &VirtReg,
485 SmallLISet &RecoloringCandidates,
486 const SmallVirtRegSet &FixedRegisters);
487
488 MCRegister tryAssign(LiveInterval&, AllocationOrder&,
489 SmallVectorImpl<Register>&,
490 const SmallVirtRegSet&);
491 MCRegister tryEvict(LiveInterval &, AllocationOrder &,
492 SmallVectorImpl<Register> &, uint8_t,
493 const SmallVirtRegSet &);
494 MCRegister tryRegionSplit(LiveInterval &, AllocationOrder &,
495 SmallVectorImpl<Register> &);
496 /// Calculate cost of region splitting.
497 unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
498 AllocationOrder &Order,
499 BlockFrequency &BestCost,
500 unsigned &NumCands, bool IgnoreCSR,
501 bool *CanCauseEvictionChain = nullptr);
502 /// Perform region splitting.
503 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
504 bool HasCompact,
505 SmallVectorImpl<Register> &NewVRegs);
506 /// Check other options before using a callee-saved register for the first
507 /// time.
508 MCRegister tryAssignCSRFirstTime(LiveInterval &VirtReg,
509 AllocationOrder &Order, MCRegister PhysReg,
510 uint8_t &CostPerUseLimit,
511 SmallVectorImpl<Register> &NewVRegs);
512 void initializeCSRCost();
513 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
514 SmallVectorImpl<Register>&);
515 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
516 SmallVectorImpl<Register>&);
517 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
518 SmallVectorImpl<Register>&);
519 unsigned trySplit(LiveInterval&, AllocationOrder&,
520 SmallVectorImpl<Register>&,
521 const SmallVirtRegSet&);
522 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
523 SmallVectorImpl<Register> &,
524 SmallVirtRegSet &, unsigned);
525 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<Register> &,
526 SmallVirtRegSet &, unsigned);
527 void tryHintRecoloring(LiveInterval &);
528 void tryHintsRecoloring();
529
530 /// Model the information carried by one end of a copy.
531 struct HintInfo {
532 /// The frequency of the copy.
533 BlockFrequency Freq;
534 /// The virtual register or physical register.
535 Register Reg;
536 /// Its currently assigned register.
537 /// In case of a physical register Reg == PhysReg.
538 MCRegister PhysReg;
539
HintInfo__anoncc1939a30111::RAGreedy::HintInfo540 HintInfo(BlockFrequency Freq, Register Reg, MCRegister PhysReg)
541 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
542 };
543 using HintsInfo = SmallVector<HintInfo, 4>;
544
545 BlockFrequency getBrokenHintFreq(const HintsInfo &, MCRegister);
546 void collectHintInfo(Register, HintsInfo &);
547
548 bool isUnusedCalleeSavedReg(MCRegister PhysReg) const;
549
550 /// Greedy RA statistic to remark.
551 struct RAGreedyStats {
552 unsigned Reloads = 0;
553 unsigned FoldedReloads = 0;
554 unsigned ZeroCostFoldedReloads = 0;
555 unsigned Spills = 0;
556 unsigned FoldedSpills = 0;
557 unsigned Copies = 0;
558 float ReloadsCost = 0.0f;
559 float FoldedReloadsCost = 0.0f;
560 float SpillsCost = 0.0f;
561 float FoldedSpillsCost = 0.0f;
562 float CopiesCost = 0.0f;
563
isEmpty__anoncc1939a30111::RAGreedy::RAGreedyStats564 bool isEmpty() {
565 return !(Reloads || FoldedReloads || Spills || FoldedSpills ||
566 ZeroCostFoldedReloads || Copies);
567 }
568
add__anoncc1939a30111::RAGreedy::RAGreedyStats569 void add(RAGreedyStats other) {
570 Reloads += other.Reloads;
571 FoldedReloads += other.FoldedReloads;
572 ZeroCostFoldedReloads += other.ZeroCostFoldedReloads;
573 Spills += other.Spills;
574 FoldedSpills += other.FoldedSpills;
575 Copies += other.Copies;
576 ReloadsCost += other.ReloadsCost;
577 FoldedReloadsCost += other.FoldedReloadsCost;
578 SpillsCost += other.SpillsCost;
579 FoldedSpillsCost += other.FoldedSpillsCost;
580 CopiesCost += other.CopiesCost;
581 }
582
583 void report(MachineOptimizationRemarkMissed &R);
584 };
585
586 /// Compute statistic for a basic block.
587 RAGreedyStats computeStats(MachineBasicBlock &MBB);
588
589 /// Compute and report statistic through a remark.
590 RAGreedyStats reportStats(MachineLoop *L);
591
592 /// Report the statistic for each loop.
593 void reportStats();
594 };
595
596 } // end anonymous namespace
597
598 char RAGreedy::ID = 0;
599 char &llvm::RAGreedyID = RAGreedy::ID;
600
601 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy",
602 "Greedy Register Allocator", false, false)
603 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
604 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
605 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
606 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
607 INITIALIZE_PASS_DEPENDENCY(MachineScheduler)
608 INITIALIZE_PASS_DEPENDENCY(LiveStacks)
609 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
610 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
611 INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
612 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix)
613 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
614 INITIALIZE_PASS_DEPENDENCY(SpillPlacement)
615 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
616 INITIALIZE_PASS_END(RAGreedy, "greedy",
617 "Greedy Register Allocator", false, false)
618
619 #ifndef NDEBUG
620 const char *const RAGreedy::StageName[] = {
621 "RS_New",
622 "RS_Assign",
623 "RS_Split",
624 "RS_Split2",
625 "RS_Spill",
626 "RS_Memory",
627 "RS_Done"
628 };
629 #endif
630
631 // Hysteresis to use when comparing floats.
632 // This helps stabilize decisions based on float comparisons.
633 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
634
createGreedyRegisterAllocator()635 FunctionPass* llvm::createGreedyRegisterAllocator() {
636 return new RAGreedy();
637 }
638
639 namespace llvm {
640 FunctionPass* createGreedyRegisterAllocator(
641 std::function<bool(const TargetRegisterInfo &TRI,
642 const TargetRegisterClass &RC)> Ftor);
643
644 }
645
createGreedyRegisterAllocator(std::function<bool (const TargetRegisterInfo & TRI,const TargetRegisterClass & RC)> Ftor)646 FunctionPass* llvm::createGreedyRegisterAllocator(
647 std::function<bool(const TargetRegisterInfo &TRI,
648 const TargetRegisterClass &RC)> Ftor) {
649 return new RAGreedy(Ftor);
650 }
651
RAGreedy(RegClassFilterFunc F)652 RAGreedy::RAGreedy(RegClassFilterFunc F):
653 MachineFunctionPass(ID),
654 RegAllocBase(F) {
655 }
656
getAnalysisUsage(AnalysisUsage & AU) const657 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
658 AU.setPreservesCFG();
659 AU.addRequired<MachineBlockFrequencyInfo>();
660 AU.addPreserved<MachineBlockFrequencyInfo>();
661 AU.addRequired<AAResultsWrapperPass>();
662 AU.addPreserved<AAResultsWrapperPass>();
663 AU.addRequired<LiveIntervals>();
664 AU.addPreserved<LiveIntervals>();
665 AU.addRequired<SlotIndexes>();
666 AU.addPreserved<SlotIndexes>();
667 AU.addRequired<LiveDebugVariables>();
668 AU.addPreserved<LiveDebugVariables>();
669 AU.addRequired<LiveStacks>();
670 AU.addPreserved<LiveStacks>();
671 AU.addRequired<MachineDominatorTree>();
672 AU.addPreserved<MachineDominatorTree>();
673 AU.addRequired<MachineLoopInfo>();
674 AU.addPreserved<MachineLoopInfo>();
675 AU.addRequired<VirtRegMap>();
676 AU.addPreserved<VirtRegMap>();
677 AU.addRequired<LiveRegMatrix>();
678 AU.addPreserved<LiveRegMatrix>();
679 AU.addRequired<EdgeBundles>();
680 AU.addRequired<SpillPlacement>();
681 AU.addRequired<MachineOptimizationRemarkEmitterPass>();
682 MachineFunctionPass::getAnalysisUsage(AU);
683 }
684
685 //===----------------------------------------------------------------------===//
686 // LiveRangeEdit delegate methods
687 //===----------------------------------------------------------------------===//
688
LRE_CanEraseVirtReg(Register VirtReg)689 bool RAGreedy::LRE_CanEraseVirtReg(Register VirtReg) {
690 LiveInterval &LI = LIS->getInterval(VirtReg);
691 if (VRM->hasPhys(VirtReg)) {
692 Matrix->unassign(LI);
693 aboutToRemoveInterval(LI);
694 return true;
695 }
696 // Unassigned virtreg is probably in the priority queue.
697 // RegAllocBase will erase it after dequeueing.
698 // Nonetheless, clear the live-range so that the debug
699 // dump will show the right state for that VirtReg.
700 LI.clear();
701 return false;
702 }
703
LRE_WillShrinkVirtReg(Register VirtReg)704 void RAGreedy::LRE_WillShrinkVirtReg(Register VirtReg) {
705 if (!VRM->hasPhys(VirtReg))
706 return;
707
708 // Register is assigned, put it back on the queue for reassignment.
709 LiveInterval &LI = LIS->getInterval(VirtReg);
710 Matrix->unassign(LI);
711 RegAllocBase::enqueue(&LI);
712 }
713
LRE_DidCloneVirtReg(Register New,Register Old)714 void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) {
715 // Cloning a register we haven't even heard about yet? Just ignore it.
716 if (!ExtraRegInfo.inBounds(Old))
717 return;
718
719 // LRE may clone a virtual register because dead code elimination causes it to
720 // be split into connected components. The new components are much smaller
721 // than the original, so they should get a new chance at being assigned.
722 // same stage as the parent.
723 ExtraRegInfo[Old].Stage = RS_Assign;
724 ExtraRegInfo.grow(New);
725 ExtraRegInfo[New] = ExtraRegInfo[Old];
726 }
727
releaseMemory()728 void RAGreedy::releaseMemory() {
729 SpillerInstance.reset();
730 ExtraRegInfo.clear();
731 GlobalCand.clear();
732 }
733
enqueueImpl(LiveInterval * LI)734 void RAGreedy::enqueueImpl(LiveInterval *LI) { enqueue(Queue, LI); }
735
enqueue(PQueue & CurQueue,LiveInterval * LI)736 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
737 // Prioritize live ranges by size, assigning larger ranges first.
738 // The queue holds (size, reg) pairs.
739 const unsigned Size = LI->getSize();
740 const Register Reg = LI->reg();
741 assert(Reg.isVirtual() && "Can only enqueue virtual registers");
742 unsigned Prio;
743
744 ExtraRegInfo.grow(Reg);
745 if (ExtraRegInfo[Reg].Stage == RS_New)
746 ExtraRegInfo[Reg].Stage = RS_Assign;
747
748 if (ExtraRegInfo[Reg].Stage == RS_Split) {
749 // Unsplit ranges that couldn't be allocated immediately are deferred until
750 // everything else has been allocated.
751 Prio = Size;
752 } else if (ExtraRegInfo[Reg].Stage == RS_Memory) {
753 // Memory operand should be considered last.
754 // Change the priority such that Memory operand are assigned in
755 // the reverse order that they came in.
756 // TODO: Make this a member variable and probably do something about hints.
757 static unsigned MemOp = 0;
758 Prio = MemOp++;
759 } else {
760 // Giant live ranges fall back to the global assignment heuristic, which
761 // prevents excessive spilling in pathological cases.
762 bool ReverseLocal = TRI->reverseLocalAssignment();
763 bool AddPriorityToGlobal = TRI->addAllocPriorityToGlobalRanges();
764 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
765 bool ForceGlobal = !ReverseLocal &&
766 (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs());
767
768 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
769 LIS->intervalIsInOneMBB(*LI)) {
770 // Allocate original local ranges in linear instruction order. Since they
771 // are singly defined, this produces optimal coloring in the absence of
772 // global interference and other constraints.
773 if (!ReverseLocal)
774 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
775 else {
776 // Allocating bottom up may allow many short LRGs to be assigned first
777 // to one of the cheap registers. This could be much faster for very
778 // large blocks on targets with many physical registers.
779 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
780 }
781 Prio |= RC.AllocationPriority << 24;
782 } else {
783 // Allocate global and split ranges in long->short order. Long ranges that
784 // don't fit should be spilled (or split) ASAP so they don't create
785 // interference. Mark a bit to prioritize global above local ranges.
786 Prio = (1u << 29) + Size;
787
788 if (AddPriorityToGlobal)
789 Prio |= RC.AllocationPriority << 24;
790 }
791 // Mark a higher bit to prioritize global and local above RS_Split.
792 Prio |= (1u << 31);
793
794 // Boost ranges that have a physical register hint.
795 if (VRM->hasKnownPreference(Reg))
796 Prio |= (1u << 30);
797 }
798 // The virtual register number is a tie breaker for same-sized ranges.
799 // Give lower vreg numbers higher priority to assign them first.
800 CurQueue.push(std::make_pair(Prio, ~Reg));
801 }
802
dequeue()803 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
804
dequeue(PQueue & CurQueue)805 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
806 if (CurQueue.empty())
807 return nullptr;
808 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
809 CurQueue.pop();
810 return LI;
811 }
812
813 //===----------------------------------------------------------------------===//
814 // Direct Assignment
815 //===----------------------------------------------------------------------===//
816
817 /// tryAssign - Try to assign VirtReg to an available register.
tryAssign(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs,const SmallVirtRegSet & FixedRegisters)818 MCRegister RAGreedy::tryAssign(LiveInterval &VirtReg,
819 AllocationOrder &Order,
820 SmallVectorImpl<Register> &NewVRegs,
821 const SmallVirtRegSet &FixedRegisters) {
822 MCRegister PhysReg;
823 for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
824 assert(*I);
825 if (!Matrix->checkInterference(VirtReg, *I)) {
826 if (I.isHint())
827 return *I;
828 else
829 PhysReg = *I;
830 }
831 }
832 if (!PhysReg.isValid())
833 return PhysReg;
834
835 // PhysReg is available, but there may be a better choice.
836
837 // If we missed a simple hint, try to cheaply evict interference from the
838 // preferred register.
839 if (Register Hint = MRI->getSimpleHint(VirtReg.reg()))
840 if (Order.isHint(Hint)) {
841 MCRegister PhysHint = Hint.asMCReg();
842 LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n');
843 EvictionCost MaxCost;
844 MaxCost.setBrokenHints(1);
845 if (canEvictInterference(VirtReg, PhysHint, true, MaxCost,
846 FixedRegisters)) {
847 evictInterference(VirtReg, PhysHint, NewVRegs);
848 return PhysHint;
849 }
850 // Record the missed hint, we may be able to recover
851 // at the end if the surrounding allocation changed.
852 SetOfBrokenHints.insert(&VirtReg);
853 }
854
855 // Try to evict interference from a cheaper alternative.
856 uint8_t Cost = RegCosts[PhysReg];
857
858 // Most registers have 0 additional cost.
859 if (!Cost)
860 return PhysReg;
861
862 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
863 << Cost << '\n');
864 MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters);
865 return CheapReg ? CheapReg : PhysReg;
866 }
867
868 //===----------------------------------------------------------------------===//
869 // Interference eviction
870 //===----------------------------------------------------------------------===//
871
canReassign(LiveInterval & VirtReg,Register PrevReg) const872 Register RAGreedy::canReassign(LiveInterval &VirtReg, Register PrevReg) const {
873 auto Order =
874 AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix);
875 MCRegister PhysReg;
876 for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) {
877 if ((*I).id() == PrevReg.id())
878 continue;
879
880 MCRegUnitIterator Units(*I, TRI);
881 for (; Units.isValid(); ++Units) {
882 // Instantiate a "subquery", not to be confused with the Queries array.
883 LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]);
884 if (subQ.checkInterference())
885 break;
886 }
887 // If no units have interference, break out with the current PhysReg.
888 if (!Units.isValid())
889 PhysReg = *I;
890 }
891 if (PhysReg)
892 LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
893 << printReg(PrevReg, TRI) << " to "
894 << printReg(PhysReg, TRI) << '\n');
895 return PhysReg;
896 }
897
898 /// shouldEvict - determine if A should evict the assigned live range B. The
899 /// eviction policy defined by this function together with the allocation order
900 /// defined by enqueue() decides which registers ultimately end up being split
901 /// and spilled.
902 ///
903 /// Cascade numbers are used to prevent infinite loops if this function is a
904 /// cyclic relation.
905 ///
906 /// @param A The live range to be assigned.
907 /// @param IsHint True when A is about to be assigned to its preferred
908 /// register.
909 /// @param B The live range to be evicted.
910 /// @param BreaksHint True when B is already assigned to its preferred register.
shouldEvict(LiveInterval & A,bool IsHint,LiveInterval & B,bool BreaksHint) const911 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
912 LiveInterval &B, bool BreaksHint) const {
913 bool CanSplit = getStage(B) < RS_Spill;
914
915 // Be fairly aggressive about following hints as long as the evictee can be
916 // split.
917 if (CanSplit && IsHint && !BreaksHint)
918 return true;
919
920 if (A.weight() > B.weight()) {
921 LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight() << '\n');
922 return true;
923 }
924 return false;
925 }
926
927 /// canEvictInterference - Return true if all interferences between VirtReg and
928 /// PhysReg can be evicted.
929 ///
930 /// @param VirtReg Live range that is about to be assigned.
931 /// @param PhysReg Desired register for assignment.
932 /// @param IsHint True when PhysReg is VirtReg's preferred register.
933 /// @param MaxCost Only look for cheaper candidates and update with new cost
934 /// when returning true.
935 /// @returns True when interference can be evicted cheaper than MaxCost.
canEvictInterference(LiveInterval & VirtReg,MCRegister PhysReg,bool IsHint,EvictionCost & MaxCost,const SmallVirtRegSet & FixedRegisters) const936 bool RAGreedy::canEvictInterference(
937 LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint,
938 EvictionCost &MaxCost, const SmallVirtRegSet &FixedRegisters) const {
939 // It is only possible to evict virtual register interference.
940 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
941 return false;
942
943 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
944
945 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
946 // involved in an eviction before. If a cascade number was assigned, deny
947 // evicting anything with the same or a newer cascade number. This prevents
948 // infinite eviction loops.
949 //
950 // This works out so a register without a cascade number is allowed to evict
951 // anything, and it can be evicted by anything.
952 unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade;
953 if (!Cascade)
954 Cascade = NextCascade;
955
956 EvictionCost Cost;
957 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
958 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
959 // If there is 10 or more interferences, chances are one is heavier.
960 if (Q.collectInterferingVRegs(10) >= 10)
961 return false;
962
963 // Check if any interfering live range is heavier than MaxWeight.
964 for (LiveInterval *Intf : reverse(Q.interferingVRegs())) {
965 assert(Register::isVirtualRegister(Intf->reg()) &&
966 "Only expecting virtual register interference from query");
967
968 // Do not allow eviction of a virtual register if we are in the middle
969 // of last-chance recoloring and this virtual register is one that we
970 // have scavenged a physical register for.
971 if (FixedRegisters.count(Intf->reg()))
972 return false;
973
974 // Never evict spill products. They cannot split or spill.
975 if (getStage(*Intf) == RS_Done)
976 return false;
977 // Once a live range becomes small enough, it is urgent that we find a
978 // register for it. This is indicated by an infinite spill weight. These
979 // urgent live ranges get to evict almost anything.
980 //
981 // Also allow urgent evictions of unspillable ranges from a strictly
982 // larger allocation order.
983 bool Urgent =
984 !VirtReg.isSpillable() &&
985 (Intf->isSpillable() ||
986 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
987 RegClassInfo.getNumAllocatableRegs(
988 MRI->getRegClass(Intf->reg())));
989 // Only evict older cascades or live ranges without a cascade.
990 unsigned IntfCascade = ExtraRegInfo[Intf->reg()].Cascade;
991 if (Cascade <= IntfCascade) {
992 if (!Urgent)
993 return false;
994 // We permit breaking cascades for urgent evictions. It should be the
995 // last resort, though, so make it really expensive.
996 Cost.BrokenHints += 10;
997 }
998 // Would this break a satisfied hint?
999 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg());
1000 // Update eviction cost.
1001 Cost.BrokenHints += BreaksHint;
1002 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight());
1003 // Abort if this would be too expensive.
1004 if (!(Cost < MaxCost))
1005 return false;
1006 if (Urgent)
1007 continue;
1008 // Apply the eviction policy for non-urgent evictions.
1009 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
1010 return false;
1011 // If !MaxCost.isMax(), then we're just looking for a cheap register.
1012 // Evicting another local live range in this case could lead to suboptimal
1013 // coloring.
1014 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
1015 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
1016 return false;
1017 }
1018 }
1019 }
1020 MaxCost = Cost;
1021 return true;
1022 }
1023
1024 /// Return true if all interferences between VirtReg and PhysReg between
1025 /// Start and End can be evicted.
1026 ///
1027 /// \param VirtReg Live range that is about to be assigned.
1028 /// \param PhysReg Desired register for assignment.
1029 /// \param Start Start of range to look for interferences.
1030 /// \param End End of range to look for interferences.
1031 /// \param MaxCost Only look for cheaper candidates and update with new cost
1032 /// when returning true.
1033 /// \return True when interference can be evicted cheaper than MaxCost.
canEvictInterferenceInRange(const LiveInterval & VirtReg,MCRegister PhysReg,SlotIndex Start,SlotIndex End,EvictionCost & MaxCost) const1034 bool RAGreedy::canEvictInterferenceInRange(const LiveInterval &VirtReg,
1035 MCRegister PhysReg, SlotIndex Start,
1036 SlotIndex End,
1037 EvictionCost &MaxCost) const {
1038 EvictionCost Cost;
1039
1040 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1041 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1042 Q.collectInterferingVRegs();
1043
1044 // Check if any interfering live range is heavier than MaxWeight.
1045 for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) {
1046 // Check if interference overlast the segment in interest.
1047 if (!Intf->overlaps(Start, End))
1048 continue;
1049
1050 // Cannot evict non virtual reg interference.
1051 if (!Register::isVirtualRegister(Intf->reg()))
1052 return false;
1053 // Never evict spill products. They cannot split or spill.
1054 if (getStage(*Intf) == RS_Done)
1055 return false;
1056
1057 // Would this break a satisfied hint?
1058 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg());
1059 // Update eviction cost.
1060 Cost.BrokenHints += BreaksHint;
1061 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight());
1062 // Abort if this would be too expensive.
1063 if (!(Cost < MaxCost))
1064 return false;
1065 }
1066 }
1067
1068 if (Cost.MaxWeight == 0)
1069 return false;
1070
1071 MaxCost = Cost;
1072 return true;
1073 }
1074
1075 /// Return the physical register that will be best
1076 /// candidate for eviction by a local split interval that will be created
1077 /// between Start and End.
1078 ///
1079 /// \param Order The allocation order
1080 /// \param VirtReg Live range that is about to be assigned.
1081 /// \param Start Start of range to look for interferences
1082 /// \param End End of range to look for interferences
1083 /// \param BestEvictweight The eviction cost of that eviction
1084 /// \return The PhysReg which is the best candidate for eviction and the
1085 /// eviction cost in BestEvictweight
getCheapestEvicteeWeight(const AllocationOrder & Order,const LiveInterval & VirtReg,SlotIndex Start,SlotIndex End,float * BestEvictweight) const1086 MCRegister RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order,
1087 const LiveInterval &VirtReg,
1088 SlotIndex Start, SlotIndex End,
1089 float *BestEvictweight) const {
1090 EvictionCost BestEvictCost;
1091 BestEvictCost.setMax();
1092 BestEvictCost.MaxWeight = VirtReg.weight();
1093 MCRegister BestEvicteePhys;
1094
1095 // Go over all physical registers and find the best candidate for eviction
1096 for (MCRegister PhysReg : Order.getOrder()) {
1097
1098 if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End,
1099 BestEvictCost))
1100 continue;
1101
1102 // Best so far.
1103 BestEvicteePhys = PhysReg;
1104 }
1105 *BestEvictweight = BestEvictCost.MaxWeight;
1106 return BestEvicteePhys;
1107 }
1108
1109 /// evictInterference - Evict any interferring registers that prevent VirtReg
1110 /// from being assigned to Physreg. This assumes that canEvictInterference
1111 /// returned true.
evictInterference(LiveInterval & VirtReg,MCRegister PhysReg,SmallVectorImpl<Register> & NewVRegs)1112 void RAGreedy::evictInterference(LiveInterval &VirtReg, MCRegister PhysReg,
1113 SmallVectorImpl<Register> &NewVRegs) {
1114 // Make sure that VirtReg has a cascade number, and assign that cascade
1115 // number to every evicted register. These live ranges than then only be
1116 // evicted by a newer cascade, preventing infinite loops.
1117 unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade;
1118 if (!Cascade)
1119 Cascade = ExtraRegInfo[VirtReg.reg()].Cascade = NextCascade++;
1120
1121 LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
1122 << " interference: Cascade " << Cascade << '\n');
1123
1124 // Collect all interfering virtregs first.
1125 SmallVector<LiveInterval*, 8> Intfs;
1126 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1127 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1128 // We usually have the interfering VRegs cached so collectInterferingVRegs()
1129 // should be fast, we may need to recalculate if when different physregs
1130 // overlap the same register unit so we had different SubRanges queried
1131 // against it.
1132 Q.collectInterferingVRegs();
1133 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
1134 Intfs.append(IVR.begin(), IVR.end());
1135 }
1136
1137 // Evict them second. This will invalidate the queries.
1138 for (LiveInterval *Intf : Intfs) {
1139 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
1140 if (!VRM->hasPhys(Intf->reg()))
1141 continue;
1142
1143 LastEvicted.addEviction(PhysReg, VirtReg.reg(), Intf->reg());
1144
1145 Matrix->unassign(*Intf);
1146 assert((ExtraRegInfo[Intf->reg()].Cascade < Cascade ||
1147 VirtReg.isSpillable() < Intf->isSpillable()) &&
1148 "Cannot decrease cascade number, illegal eviction");
1149 ExtraRegInfo[Intf->reg()].Cascade = Cascade;
1150 ++NumEvicted;
1151 NewVRegs.push_back(Intf->reg());
1152 }
1153 }
1154
1155 /// Returns true if the given \p PhysReg is a callee saved register and has not
1156 /// been used for allocation yet.
isUnusedCalleeSavedReg(MCRegister PhysReg) const1157 bool RAGreedy::isUnusedCalleeSavedReg(MCRegister PhysReg) const {
1158 MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
1159 if (!CSR)
1160 return false;
1161
1162 return !Matrix->isPhysRegUsed(PhysReg);
1163 }
1164
1165 /// tryEvict - Try to evict all interferences for a physreg.
1166 /// @param VirtReg Currently unassigned virtual register.
1167 /// @param Order Physregs to try.
1168 /// @return Physreg to assign VirtReg, or 0.
tryEvict(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs,uint8_t CostPerUseLimit,const SmallVirtRegSet & FixedRegisters)1169 MCRegister RAGreedy::tryEvict(LiveInterval &VirtReg, AllocationOrder &Order,
1170 SmallVectorImpl<Register> &NewVRegs,
1171 uint8_t CostPerUseLimit,
1172 const SmallVirtRegSet &FixedRegisters) {
1173 NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription,
1174 TimePassesIsEnabled);
1175
1176 // Keep track of the cheapest interference seen so far.
1177 EvictionCost BestCost;
1178 BestCost.setMax();
1179 MCRegister BestPhys;
1180 unsigned OrderLimit = Order.getOrder().size();
1181
1182 // When we are just looking for a reduced cost per use, don't break any
1183 // hints, and only evict smaller spill weights.
1184 if (CostPerUseLimit < uint8_t(~0u)) {
1185 BestCost.BrokenHints = 0;
1186 BestCost.MaxWeight = VirtReg.weight();
1187
1188 // Check of any registers in RC are below CostPerUseLimit.
1189 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg());
1190 uint8_t MinCost = RegClassInfo.getMinCost(RC);
1191 if (MinCost >= CostPerUseLimit) {
1192 LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
1193 << MinCost << ", no cheaper registers to be found.\n");
1194 return 0;
1195 }
1196
1197 // It is normal for register classes to have a long tail of registers with
1198 // the same cost. We don't need to look at them if they're too expensive.
1199 if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) {
1200 OrderLimit = RegClassInfo.getLastCostChange(RC);
1201 LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
1202 << " regs.\n");
1203 }
1204 }
1205
1206 for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
1207 ++I) {
1208 MCRegister PhysReg = *I;
1209 assert(PhysReg);
1210 if (RegCosts[PhysReg] >= CostPerUseLimit)
1211 continue;
1212 // The first use of a callee-saved register in a function has cost 1.
1213 // Don't start using a CSR when the CostPerUseLimit is low.
1214 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
1215 LLVM_DEBUG(
1216 dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
1217 << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
1218 << '\n');
1219 continue;
1220 }
1221
1222 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost,
1223 FixedRegisters))
1224 continue;
1225
1226 // Best so far.
1227 BestPhys = PhysReg;
1228
1229 // Stop if the hint can be used.
1230 if (I.isHint())
1231 break;
1232 }
1233
1234 if (BestPhys.isValid())
1235 evictInterference(VirtReg, BestPhys, NewVRegs);
1236 return BestPhys;
1237 }
1238
1239 //===----------------------------------------------------------------------===//
1240 // Region Splitting
1241 //===----------------------------------------------------------------------===//
1242
1243 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
1244 /// interference pattern in Physreg and its aliases. Add the constraints to
1245 /// SpillPlacement and return the static cost of this split in Cost, assuming
1246 /// that all preferences in SplitConstraints are met.
1247 /// Return false if there are no bundles with positive bias.
addSplitConstraints(InterferenceCache::Cursor Intf,BlockFrequency & Cost)1248 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
1249 BlockFrequency &Cost) {
1250 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1251
1252 // Reset interference dependent info.
1253 SplitConstraints.resize(UseBlocks.size());
1254 BlockFrequency StaticCost = 0;
1255 for (unsigned I = 0; I != UseBlocks.size(); ++I) {
1256 const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
1257 SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
1258
1259 BC.Number = BI.MBB->getNumber();
1260 Intf.moveToBlock(BC.Number);
1261 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
1262 BC.Exit = (BI.LiveOut &&
1263 !LIS->getInstructionFromIndex(BI.LastInstr)->isImplicitDef())
1264 ? SpillPlacement::PrefReg
1265 : SpillPlacement::DontCare;
1266 BC.ChangesValue = BI.FirstDef.isValid();
1267
1268 if (!Intf.hasInterference())
1269 continue;
1270
1271 // Number of spill code instructions to insert.
1272 unsigned Ins = 0;
1273
1274 // Interference for the live-in value.
1275 if (BI.LiveIn) {
1276 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) {
1277 BC.Entry = SpillPlacement::MustSpill;
1278 ++Ins;
1279 } else if (Intf.first() < BI.FirstInstr) {
1280 BC.Entry = SpillPlacement::PrefSpill;
1281 ++Ins;
1282 } else if (Intf.first() < BI.LastInstr) {
1283 ++Ins;
1284 }
1285
1286 // Abort if the spill cannot be inserted at the MBB' start
1287 if (((BC.Entry == SpillPlacement::MustSpill) ||
1288 (BC.Entry == SpillPlacement::PrefSpill)) &&
1289 SlotIndex::isEarlierInstr(BI.FirstInstr,
1290 SA->getFirstSplitPoint(BC.Number)))
1291 return false;
1292 }
1293
1294 // Interference for the live-out value.
1295 if (BI.LiveOut) {
1296 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) {
1297 BC.Exit = SpillPlacement::MustSpill;
1298 ++Ins;
1299 } else if (Intf.last() > BI.LastInstr) {
1300 BC.Exit = SpillPlacement::PrefSpill;
1301 ++Ins;
1302 } else if (Intf.last() > BI.FirstInstr) {
1303 ++Ins;
1304 }
1305 }
1306
1307 // Accumulate the total frequency of inserted spill code.
1308 while (Ins--)
1309 StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
1310 }
1311 Cost = StaticCost;
1312
1313 // Add constraints for use-blocks. Note that these are the only constraints
1314 // that may add a positive bias, it is downhill from here.
1315 SpillPlacer->addConstraints(SplitConstraints);
1316 return SpillPlacer->scanActiveBundles();
1317 }
1318
1319 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
1320 /// live-through blocks in Blocks.
addThroughConstraints(InterferenceCache::Cursor Intf,ArrayRef<unsigned> Blocks)1321 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
1322 ArrayRef<unsigned> Blocks) {
1323 const unsigned GroupSize = 8;
1324 SpillPlacement::BlockConstraint BCS[GroupSize];
1325 unsigned TBS[GroupSize];
1326 unsigned B = 0, T = 0;
1327
1328 for (unsigned Number : Blocks) {
1329 Intf.moveToBlock(Number);
1330
1331 if (!Intf.hasInterference()) {
1332 assert(T < GroupSize && "Array overflow");
1333 TBS[T] = Number;
1334 if (++T == GroupSize) {
1335 SpillPlacer->addLinks(makeArrayRef(TBS, T));
1336 T = 0;
1337 }
1338 continue;
1339 }
1340
1341 assert(B < GroupSize && "Array overflow");
1342 BCS[B].Number = Number;
1343
1344 // Abort if the spill cannot be inserted at the MBB' start
1345 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
1346 auto FirstNonDebugInstr = MBB->getFirstNonDebugInstr();
1347 if (FirstNonDebugInstr != MBB->end() &&
1348 SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*FirstNonDebugInstr),
1349 SA->getFirstSplitPoint(Number)))
1350 return false;
1351 // Interference for the live-in value.
1352 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
1353 BCS[B].Entry = SpillPlacement::MustSpill;
1354 else
1355 BCS[B].Entry = SpillPlacement::PrefSpill;
1356
1357 // Interference for the live-out value.
1358 if (Intf.last() >= SA->getLastSplitPoint(Number))
1359 BCS[B].Exit = SpillPlacement::MustSpill;
1360 else
1361 BCS[B].Exit = SpillPlacement::PrefSpill;
1362
1363 if (++B == GroupSize) {
1364 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1365 B = 0;
1366 }
1367 }
1368
1369 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1370 SpillPlacer->addLinks(makeArrayRef(TBS, T));
1371 return true;
1372 }
1373
growRegion(GlobalSplitCandidate & Cand)1374 bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
1375 // Keep track of through blocks that have not been added to SpillPlacer.
1376 BitVector Todo = SA->getThroughBlocks();
1377 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
1378 unsigned AddedTo = 0;
1379 #ifndef NDEBUG
1380 unsigned Visited = 0;
1381 #endif
1382
1383 while (true) {
1384 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
1385 // Find new through blocks in the periphery of PrefRegBundles.
1386 for (unsigned Bundle : NewBundles) {
1387 // Look at all blocks connected to Bundle in the full graph.
1388 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
1389 for (unsigned Block : Blocks) {
1390 if (!Todo.test(Block))
1391 continue;
1392 Todo.reset(Block);
1393 // This is a new through block. Add it to SpillPlacer later.
1394 ActiveBlocks.push_back(Block);
1395 #ifndef NDEBUG
1396 ++Visited;
1397 #endif
1398 }
1399 }
1400 // Any new blocks to add?
1401 if (ActiveBlocks.size() == AddedTo)
1402 break;
1403
1404 // Compute through constraints from the interference, or assume that all
1405 // through blocks prefer spilling when forming compact regions.
1406 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
1407 if (Cand.PhysReg) {
1408 if (!addThroughConstraints(Cand.Intf, NewBlocks))
1409 return false;
1410 } else
1411 // Provide a strong negative bias on through blocks to prevent unwanted
1412 // liveness on loop backedges.
1413 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
1414 AddedTo = ActiveBlocks.size();
1415
1416 // Perhaps iterating can enable more bundles?
1417 SpillPlacer->iterate();
1418 }
1419 LLVM_DEBUG(dbgs() << ", v=" << Visited);
1420 return true;
1421 }
1422
1423 /// calcCompactRegion - Compute the set of edge bundles that should be live
1424 /// when splitting the current live range into compact regions. Compact
1425 /// regions can be computed without looking at interference. They are the
1426 /// regions formed by removing all the live-through blocks from the live range.
1427 ///
1428 /// Returns false if the current live range is already compact, or if the
1429 /// compact regions would form single block regions anyway.
calcCompactRegion(GlobalSplitCandidate & Cand)1430 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
1431 // Without any through blocks, the live range is already compact.
1432 if (!SA->getNumThroughBlocks())
1433 return false;
1434
1435 // Compact regions don't correspond to any physreg.
1436 Cand.reset(IntfCache, MCRegister::NoRegister);
1437
1438 LLVM_DEBUG(dbgs() << "Compact region bundles");
1439
1440 // Use the spill placer to determine the live bundles. GrowRegion pretends
1441 // that all the through blocks have interference when PhysReg is unset.
1442 SpillPlacer->prepare(Cand.LiveBundles);
1443
1444 // The static split cost will be zero since Cand.Intf reports no interference.
1445 BlockFrequency Cost;
1446 if (!addSplitConstraints(Cand.Intf, Cost)) {
1447 LLVM_DEBUG(dbgs() << ", none.\n");
1448 return false;
1449 }
1450
1451 if (!growRegion(Cand)) {
1452 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1453 return false;
1454 }
1455
1456 SpillPlacer->finish();
1457
1458 if (!Cand.LiveBundles.any()) {
1459 LLVM_DEBUG(dbgs() << ", none.\n");
1460 return false;
1461 }
1462
1463 LLVM_DEBUG({
1464 for (int I : Cand.LiveBundles.set_bits())
1465 dbgs() << " EB#" << I;
1466 dbgs() << ".\n";
1467 });
1468 return true;
1469 }
1470
1471 /// calcSpillCost - Compute how expensive it would be to split the live range in
1472 /// SA around all use blocks instead of forming bundle regions.
calcSpillCost()1473 BlockFrequency RAGreedy::calcSpillCost() {
1474 BlockFrequency Cost = 0;
1475 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1476 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1477 unsigned Number = BI.MBB->getNumber();
1478 // We normally only need one spill instruction - a load or a store.
1479 Cost += SpillPlacer->getBlockFrequency(Number);
1480
1481 // Unless the value is redefined in the block.
1482 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
1483 Cost += SpillPlacer->getBlockFrequency(Number);
1484 }
1485 return Cost;
1486 }
1487
1488 /// Check if splitting Evictee will create a local split interval in
1489 /// basic block number BBNumber that may cause a bad eviction chain. This is
1490 /// intended to prevent bad eviction sequences like:
1491 /// movl %ebp, 8(%esp) # 4-byte Spill
1492 /// movl %ecx, %ebp
1493 /// movl %ebx, %ecx
1494 /// movl %edi, %ebx
1495 /// movl %edx, %edi
1496 /// cltd
1497 /// idivl %esi
1498 /// movl %edi, %edx
1499 /// movl %ebx, %edi
1500 /// movl %ecx, %ebx
1501 /// movl %ebp, %ecx
1502 /// movl 16(%esp), %ebp # 4 - byte Reload
1503 ///
1504 /// Such sequences are created in 2 scenarios:
1505 ///
1506 /// Scenario #1:
1507 /// %0 is evicted from physreg0 by %1.
1508 /// Evictee %0 is intended for region splitting with split candidate
1509 /// physreg0 (the reg %0 was evicted from).
1510 /// Region splitting creates a local interval because of interference with the
1511 /// evictor %1 (normally region splitting creates 2 interval, the "by reg"
1512 /// and "by stack" intervals and local interval created when interference
1513 /// occurs).
1514 /// One of the split intervals ends up evicting %2 from physreg1.
1515 /// Evictee %2 is intended for region splitting with split candidate
1516 /// physreg1.
1517 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1518 ///
1519 /// Scenario #2
1520 /// %0 is evicted from physreg0 by %1.
1521 /// %2 is evicted from physreg2 by %3 etc.
1522 /// Evictee %0 is intended for region splitting with split candidate
1523 /// physreg1.
1524 /// Region splitting creates a local interval because of interference with the
1525 /// evictor %1.
1526 /// One of the split intervals ends up evicting back original evictor %1
1527 /// from physreg0 (the reg %0 was evicted from).
1528 /// Another evictee %2 is intended for region splitting with split candidate
1529 /// physreg1.
1530 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1531 ///
1532 /// \param Evictee The register considered to be split.
1533 /// \param Cand The split candidate that determines the physical register
1534 /// we are splitting for and the interferences.
1535 /// \param BBNumber The number of a BB for which the region split process will
1536 /// create a local split interval.
1537 /// \param Order The physical registers that may get evicted by a split
1538 /// artifact of Evictee.
1539 /// \return True if splitting Evictee may cause a bad eviction chain, false
1540 /// otherwise.
splitCanCauseEvictionChain(Register Evictee,GlobalSplitCandidate & Cand,unsigned BBNumber,const AllocationOrder & Order)1541 bool RAGreedy::splitCanCauseEvictionChain(Register Evictee,
1542 GlobalSplitCandidate &Cand,
1543 unsigned BBNumber,
1544 const AllocationOrder &Order) {
1545 EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee);
1546 unsigned Evictor = VregEvictorInfo.first;
1547 MCRegister PhysReg = VregEvictorInfo.second;
1548
1549 // No actual evictor.
1550 if (!Evictor || !PhysReg)
1551 return false;
1552
1553 float MaxWeight = 0;
1554 MCRegister FutureEvictedPhysReg =
1555 getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee),
1556 Cand.Intf.first(), Cand.Intf.last(), &MaxWeight);
1557
1558 // The bad eviction chain occurs when either the split candidate is the
1559 // evicting reg or one of the split artifact will evict the evicting reg.
1560 if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg))
1561 return false;
1562
1563 Cand.Intf.moveToBlock(BBNumber);
1564
1565 // Check to see if the Evictor contains interference (with Evictee) in the
1566 // given BB. If so, this interference caused the eviction of Evictee from
1567 // PhysReg. This suggest that we will create a local interval during the
1568 // region split to avoid this interference This local interval may cause a bad
1569 // eviction chain.
1570 if (!LIS->hasInterval(Evictor))
1571 return false;
1572 LiveInterval &EvictorLI = LIS->getInterval(Evictor);
1573 if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end())
1574 return false;
1575
1576 // Now, check to see if the local interval we will create is going to be
1577 // expensive enough to evict somebody If so, this may cause a bad eviction
1578 // chain.
1579 float splitArtifactWeight =
1580 VRAI->futureWeight(LIS->getInterval(Evictee),
1581 Cand.Intf.first().getPrevIndex(), Cand.Intf.last());
1582 if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight)
1583 return false;
1584
1585 return true;
1586 }
1587
1588 /// Check if splitting VirtRegToSplit will create a local split interval
1589 /// in basic block number BBNumber that may cause a spill.
1590 ///
1591 /// \param VirtRegToSplit The register considered to be split.
1592 /// \param Cand The split candidate that determines the physical
1593 /// register we are splitting for and the interferences.
1594 /// \param BBNumber The number of a BB for which the region split process
1595 /// will create a local split interval.
1596 /// \param Order The physical registers that may get evicted by a
1597 /// split artifact of VirtRegToSplit.
1598 /// \return True if splitting VirtRegToSplit may cause a spill, false
1599 /// otherwise.
splitCanCauseLocalSpill(unsigned VirtRegToSplit,GlobalSplitCandidate & Cand,unsigned BBNumber,const AllocationOrder & Order)1600 bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit,
1601 GlobalSplitCandidate &Cand,
1602 unsigned BBNumber,
1603 const AllocationOrder &Order) {
1604 Cand.Intf.moveToBlock(BBNumber);
1605
1606 // Check if the local interval will find a non interfereing assignment.
1607 for (auto PhysReg : Order.getOrder()) {
1608 if (!Matrix->checkInterference(Cand.Intf.first().getPrevIndex(),
1609 Cand.Intf.last(), PhysReg))
1610 return false;
1611 }
1612
1613 // The local interval is not able to find non interferencing assignment
1614 // and not able to evict a less worthy interval, therfore, it can cause a
1615 // spill.
1616 return true;
1617 }
1618
1619 /// calcGlobalSplitCost - Return the global split cost of following the split
1620 /// pattern in LiveBundles. This cost should be added to the local cost of the
1621 /// interference pattern in SplitConstraints.
1622 ///
calcGlobalSplitCost(GlobalSplitCandidate & Cand,const AllocationOrder & Order,bool * CanCauseEvictionChain)1623 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
1624 const AllocationOrder &Order,
1625 bool *CanCauseEvictionChain) {
1626 BlockFrequency GlobalCost = 0;
1627 const BitVector &LiveBundles = Cand.LiveBundles;
1628 Register VirtRegToSplit = SA->getParent().reg();
1629 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1630 for (unsigned I = 0; I != UseBlocks.size(); ++I) {
1631 const SplitAnalysis::BlockInfo &BI = UseBlocks[I];
1632 SpillPlacement::BlockConstraint &BC = SplitConstraints[I];
1633 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)];
1634 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)];
1635 unsigned Ins = 0;
1636
1637 Cand.Intf.moveToBlock(BC.Number);
1638 // Check wheather a local interval is going to be created during the region
1639 // split. Calculate adavanced spilt cost (cost of local intervals) if option
1640 // is enabled.
1641 if (EnableAdvancedRASplitCost && Cand.Intf.hasInterference() && BI.LiveIn &&
1642 BI.LiveOut && RegIn && RegOut) {
1643
1644 if (CanCauseEvictionChain &&
1645 splitCanCauseEvictionChain(VirtRegToSplit, Cand, BC.Number, Order)) {
1646 // This interference causes our eviction from this assignment, we might
1647 // evict somebody else and eventually someone will spill, add that cost.
1648 // See splitCanCauseEvictionChain for detailed description of scenarios.
1649 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1650 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1651
1652 *CanCauseEvictionChain = true;
1653
1654 } else if (splitCanCauseLocalSpill(VirtRegToSplit, Cand, BC.Number,
1655 Order)) {
1656 // This interference causes local interval to spill, add that cost.
1657 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1658 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1659 }
1660 }
1661
1662 if (BI.LiveIn)
1663 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
1664 if (BI.LiveOut)
1665 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
1666 while (Ins--)
1667 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1668 }
1669
1670 for (unsigned Number : Cand.ActiveBlocks) {
1671 bool RegIn = LiveBundles[Bundles->getBundle(Number, false)];
1672 bool RegOut = LiveBundles[Bundles->getBundle(Number, true)];
1673 if (!RegIn && !RegOut)
1674 continue;
1675 if (RegIn && RegOut) {
1676 // We need double spill code if this block has interference.
1677 Cand.Intf.moveToBlock(Number);
1678 if (Cand.Intf.hasInterference()) {
1679 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1680 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1681
1682 // Check wheather a local interval is going to be created during the
1683 // region split.
1684 if (EnableAdvancedRASplitCost && CanCauseEvictionChain &&
1685 splitCanCauseEvictionChain(VirtRegToSplit, Cand, Number, Order)) {
1686 // This interference cause our eviction from this assignment, we might
1687 // evict somebody else, add that cost.
1688 // See splitCanCauseEvictionChain for detailed description of
1689 // scenarios.
1690 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1691 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1692
1693 *CanCauseEvictionChain = true;
1694 }
1695 }
1696 continue;
1697 }
1698 // live-in / stack-out or stack-in live-out.
1699 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1700 }
1701 return GlobalCost;
1702 }
1703
1704 /// splitAroundRegion - Split the current live range around the regions
1705 /// determined by BundleCand and GlobalCand.
1706 ///
1707 /// Before calling this function, GlobalCand and BundleCand must be initialized
1708 /// so each bundle is assigned to a valid candidate, or NoCand for the
1709 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1710 /// objects must be initialized for the current live range, and intervals
1711 /// created for the used candidates.
1712 ///
1713 /// @param LREdit The LiveRangeEdit object handling the current split.
1714 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1715 /// must appear in this list.
splitAroundRegion(LiveRangeEdit & LREdit,ArrayRef<unsigned> UsedCands)1716 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1717 ArrayRef<unsigned> UsedCands) {
1718 // These are the intervals created for new global ranges. We may create more
1719 // intervals for local ranges.
1720 const unsigned NumGlobalIntvs = LREdit.size();
1721 LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
1722 << " globals.\n");
1723 assert(NumGlobalIntvs && "No global intervals configured");
1724
1725 // Isolate even single instructions when dealing with a proper sub-class.
1726 // That guarantees register class inflation for the stack interval because it
1727 // is all copies.
1728 Register Reg = SA->getParent().reg();
1729 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1730
1731 // First handle all the blocks with uses.
1732 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1733 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
1734 unsigned Number = BI.MBB->getNumber();
1735 unsigned IntvIn = 0, IntvOut = 0;
1736 SlotIndex IntfIn, IntfOut;
1737 if (BI.LiveIn) {
1738 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1739 if (CandIn != NoCand) {
1740 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1741 IntvIn = Cand.IntvIdx;
1742 Cand.Intf.moveToBlock(Number);
1743 IntfIn = Cand.Intf.first();
1744 }
1745 }
1746 if (BI.LiveOut) {
1747 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1748 if (CandOut != NoCand) {
1749 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1750 IntvOut = Cand.IntvIdx;
1751 Cand.Intf.moveToBlock(Number);
1752 IntfOut = Cand.Intf.last();
1753 }
1754 }
1755
1756 // Create separate intervals for isolated blocks with multiple uses.
1757 if (!IntvIn && !IntvOut) {
1758 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
1759 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1760 SE->splitSingleBlock(BI);
1761 continue;
1762 }
1763
1764 if (IntvIn && IntvOut)
1765 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1766 else if (IntvIn)
1767 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1768 else
1769 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1770 }
1771
1772 // Handle live-through blocks. The relevant live-through blocks are stored in
1773 // the ActiveBlocks list with each candidate. We need to filter out
1774 // duplicates.
1775 BitVector Todo = SA->getThroughBlocks();
1776 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1777 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1778 for (unsigned Number : Blocks) {
1779 if (!Todo.test(Number))
1780 continue;
1781 Todo.reset(Number);
1782
1783 unsigned IntvIn = 0, IntvOut = 0;
1784 SlotIndex IntfIn, IntfOut;
1785
1786 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1787 if (CandIn != NoCand) {
1788 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1789 IntvIn = Cand.IntvIdx;
1790 Cand.Intf.moveToBlock(Number);
1791 IntfIn = Cand.Intf.first();
1792 }
1793
1794 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1795 if (CandOut != NoCand) {
1796 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1797 IntvOut = Cand.IntvIdx;
1798 Cand.Intf.moveToBlock(Number);
1799 IntfOut = Cand.Intf.last();
1800 }
1801 if (!IntvIn && !IntvOut)
1802 continue;
1803 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1804 }
1805 }
1806
1807 ++NumGlobalSplits;
1808
1809 SmallVector<unsigned, 8> IntvMap;
1810 SE->finish(&IntvMap);
1811 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1812
1813 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1814 unsigned OrigBlocks = SA->getNumLiveBlocks();
1815
1816 // Sort out the new intervals created by splitting. We get four kinds:
1817 // - Remainder intervals should not be split again.
1818 // - Candidate intervals can be assigned to Cand.PhysReg.
1819 // - Block-local splits are candidates for local splitting.
1820 // - DCE leftovers should go back on the queue.
1821 for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
1822 LiveInterval &Reg = LIS->getInterval(LREdit.get(I));
1823
1824 // Ignore old intervals from DCE.
1825 if (getStage(Reg) != RS_New)
1826 continue;
1827
1828 // Remainder interval. Don't try splitting again, spill if it doesn't
1829 // allocate.
1830 if (IntvMap[I] == 0) {
1831 setStage(Reg, RS_Spill);
1832 continue;
1833 }
1834
1835 // Global intervals. Allow repeated splitting as long as the number of live
1836 // blocks is strictly decreasing.
1837 if (IntvMap[I] < NumGlobalIntvs) {
1838 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1839 LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1840 << " blocks as original.\n");
1841 // Don't allow repeated splitting as a safe guard against looping.
1842 setStage(Reg, RS_Split2);
1843 }
1844 continue;
1845 }
1846
1847 // Other intervals are treated as new. This includes local intervals created
1848 // for blocks with multiple uses, and anything created by DCE.
1849 }
1850
1851 if (VerifyEnabled)
1852 MF->verify(this, "After splitting live range around region");
1853 }
1854
tryRegionSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs)1855 MCRegister RAGreedy::tryRegionSplit(LiveInterval &VirtReg,
1856 AllocationOrder &Order,
1857 SmallVectorImpl<Register> &NewVRegs) {
1858 if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg))
1859 return MCRegister::NoRegister;
1860 unsigned NumCands = 0;
1861 BlockFrequency SpillCost = calcSpillCost();
1862 BlockFrequency BestCost;
1863
1864 // Check if we can split this live range around a compact region.
1865 bool HasCompact = calcCompactRegion(GlobalCand.front());
1866 if (HasCompact) {
1867 // Yes, keep GlobalCand[0] as the compact region candidate.
1868 NumCands = 1;
1869 BestCost = BlockFrequency::getMaxFrequency();
1870 } else {
1871 // No benefit from the compact region, our fallback will be per-block
1872 // splitting. Make sure we find a solution that is cheaper than spilling.
1873 BestCost = SpillCost;
1874 LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
1875 MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
1876 }
1877
1878 bool CanCauseEvictionChain = false;
1879 unsigned BestCand =
1880 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
1881 false /*IgnoreCSR*/, &CanCauseEvictionChain);
1882
1883 // Split candidates with compact regions can cause a bad eviction sequence.
1884 // See splitCanCauseEvictionChain for detailed description of scenarios.
1885 // To avoid it, we need to comapre the cost with the spill cost and not the
1886 // current max frequency.
1887 if (HasCompact && (BestCost > SpillCost) && (BestCand != NoCand) &&
1888 CanCauseEvictionChain) {
1889 return MCRegister::NoRegister;
1890 }
1891
1892 // No solutions found, fall back to single block splitting.
1893 if (!HasCompact && BestCand == NoCand)
1894 return MCRegister::NoRegister;
1895
1896 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1897 }
1898
calculateRegionSplitCost(LiveInterval & VirtReg,AllocationOrder & Order,BlockFrequency & BestCost,unsigned & NumCands,bool IgnoreCSR,bool * CanCauseEvictionChain)1899 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
1900 AllocationOrder &Order,
1901 BlockFrequency &BestCost,
1902 unsigned &NumCands, bool IgnoreCSR,
1903 bool *CanCauseEvictionChain) {
1904 unsigned BestCand = NoCand;
1905 for (MCPhysReg PhysReg : Order) {
1906 assert(PhysReg);
1907 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
1908 continue;
1909
1910 // Discard bad candidates before we run out of interference cache cursors.
1911 // This will only affect register classes with a lot of registers (>32).
1912 if (NumCands == IntfCache.getMaxCursors()) {
1913 unsigned WorstCount = ~0u;
1914 unsigned Worst = 0;
1915 for (unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) {
1916 if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg)
1917 continue;
1918 unsigned Count = GlobalCand[CandIndex].LiveBundles.count();
1919 if (Count < WorstCount) {
1920 Worst = CandIndex;
1921 WorstCount = Count;
1922 }
1923 }
1924 --NumCands;
1925 GlobalCand[Worst] = GlobalCand[NumCands];
1926 if (BestCand == NumCands)
1927 BestCand = Worst;
1928 }
1929
1930 if (GlobalCand.size() <= NumCands)
1931 GlobalCand.resize(NumCands+1);
1932 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1933 Cand.reset(IntfCache, PhysReg);
1934
1935 SpillPlacer->prepare(Cand.LiveBundles);
1936 BlockFrequency Cost;
1937 if (!addSplitConstraints(Cand.Intf, Cost)) {
1938 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
1939 continue;
1940 }
1941 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
1942 MBFI->printBlockFreq(dbgs(), Cost));
1943 if (Cost >= BestCost) {
1944 LLVM_DEBUG({
1945 if (BestCand == NoCand)
1946 dbgs() << " worse than no bundles\n";
1947 else
1948 dbgs() << " worse than "
1949 << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1950 });
1951 continue;
1952 }
1953 if (!growRegion(Cand)) {
1954 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1955 continue;
1956 }
1957
1958 SpillPlacer->finish();
1959
1960 // No live bundles, defer to splitSingleBlocks().
1961 if (!Cand.LiveBundles.any()) {
1962 LLVM_DEBUG(dbgs() << " no bundles.\n");
1963 continue;
1964 }
1965
1966 bool HasEvictionChain = false;
1967 Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain);
1968 LLVM_DEBUG({
1969 dbgs() << ", total = ";
1970 MBFI->printBlockFreq(dbgs(), Cost) << " with bundles";
1971 for (int I : Cand.LiveBundles.set_bits())
1972 dbgs() << " EB#" << I;
1973 dbgs() << ".\n";
1974 });
1975 if (Cost < BestCost) {
1976 BestCand = NumCands;
1977 BestCost = Cost;
1978 // See splitCanCauseEvictionChain for detailed description of bad
1979 // eviction chain scenarios.
1980 if (CanCauseEvictionChain)
1981 *CanCauseEvictionChain = HasEvictionChain;
1982 }
1983 ++NumCands;
1984 }
1985
1986 if (CanCauseEvictionChain && BestCand != NoCand) {
1987 // See splitCanCauseEvictionChain for detailed description of bad
1988 // eviction chain scenarios.
1989 LLVM_DEBUG(dbgs() << "Best split candidate of vreg "
1990 << printReg(VirtReg.reg(), TRI) << " may ");
1991 if (!(*CanCauseEvictionChain))
1992 LLVM_DEBUG(dbgs() << "not ");
1993 LLVM_DEBUG(dbgs() << "cause bad eviction chain\n");
1994 }
1995
1996 return BestCand;
1997 }
1998
doRegionSplit(LiveInterval & VirtReg,unsigned BestCand,bool HasCompact,SmallVectorImpl<Register> & NewVRegs)1999 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
2000 bool HasCompact,
2001 SmallVectorImpl<Register> &NewVRegs) {
2002 SmallVector<unsigned, 8> UsedCands;
2003 // Prepare split editor.
2004 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2005 SE->reset(LREdit, SplitSpillMode);
2006
2007 // Assign all edge bundles to the preferred candidate, or NoCand.
2008 BundleCand.assign(Bundles->getNumBundles(), NoCand);
2009
2010 // Assign bundles for the best candidate region.
2011 if (BestCand != NoCand) {
2012 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
2013 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
2014 UsedCands.push_back(BestCand);
2015 Cand.IntvIdx = SE->openIntv();
2016 LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
2017 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
2018 (void)B;
2019 }
2020 }
2021
2022 // Assign bundles for the compact region.
2023 if (HasCompact) {
2024 GlobalSplitCandidate &Cand = GlobalCand.front();
2025 assert(!Cand.PhysReg && "Compact region has no physreg");
2026 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
2027 UsedCands.push_back(0);
2028 Cand.IntvIdx = SE->openIntv();
2029 LLVM_DEBUG(dbgs() << "Split for compact region in " << B
2030 << " bundles, intv " << Cand.IntvIdx << ".\n");
2031 (void)B;
2032 }
2033 }
2034
2035 splitAroundRegion(LREdit, UsedCands);
2036 return 0;
2037 }
2038
2039 //===----------------------------------------------------------------------===//
2040 // Per-Block Splitting
2041 //===----------------------------------------------------------------------===//
2042
2043 /// tryBlockSplit - Split a global live range around every block with uses. This
2044 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
2045 /// they don't allocate.
tryBlockSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs)2046 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2047 SmallVectorImpl<Register> &NewVRegs) {
2048 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
2049 Register Reg = VirtReg.reg();
2050 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
2051 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2052 SE->reset(LREdit, SplitSpillMode);
2053 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
2054 for (const SplitAnalysis::BlockInfo &BI : UseBlocks) {
2055 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
2056 SE->splitSingleBlock(BI);
2057 }
2058 // No blocks were split.
2059 if (LREdit.empty())
2060 return 0;
2061
2062 // We did split for some blocks.
2063 SmallVector<unsigned, 8> IntvMap;
2064 SE->finish(&IntvMap);
2065
2066 // Tell LiveDebugVariables about the new ranges.
2067 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
2068
2069 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2070
2071 // Sort out the new intervals created by splitting. The remainder interval
2072 // goes straight to spilling, the new local ranges get to stay RS_New.
2073 for (unsigned I = 0, E = LREdit.size(); I != E; ++I) {
2074 LiveInterval &LI = LIS->getInterval(LREdit.get(I));
2075 if (getStage(LI) == RS_New && IntvMap[I] == 0)
2076 setStage(LI, RS_Spill);
2077 }
2078
2079 if (VerifyEnabled)
2080 MF->verify(this, "After splitting live range around basic blocks");
2081 return 0;
2082 }
2083
2084 //===----------------------------------------------------------------------===//
2085 // Per-Instruction Splitting
2086 //===----------------------------------------------------------------------===//
2087
2088 /// Get the number of allocatable registers that match the constraints of \p Reg
2089 /// on \p MI and that are also in \p SuperRC.
getNumAllocatableRegsForConstraints(const MachineInstr * MI,Register Reg,const TargetRegisterClass * SuperRC,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI,const RegisterClassInfo & RCI)2090 static unsigned getNumAllocatableRegsForConstraints(
2091 const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC,
2092 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
2093 const RegisterClassInfo &RCI) {
2094 assert(SuperRC && "Invalid register class");
2095
2096 const TargetRegisterClass *ConstrainedRC =
2097 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
2098 /* ExploreBundle */ true);
2099 if (!ConstrainedRC)
2100 return 0;
2101 return RCI.getNumAllocatableRegs(ConstrainedRC);
2102 }
2103
2104 /// tryInstructionSplit - Split a live range around individual instructions.
2105 /// This is normally not worthwhile since the spiller is doing essentially the
2106 /// same thing. However, when the live range is in a constrained register
2107 /// class, it may help to insert copies such that parts of the live range can
2108 /// be moved to a larger register class.
2109 ///
2110 /// This is similar to spilling to a larger register class.
2111 unsigned
tryInstructionSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs)2112 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2113 SmallVectorImpl<Register> &NewVRegs) {
2114 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
2115 // There is no point to this if there are no larger sub-classes.
2116 if (!RegClassInfo.isProperSubClass(CurRC))
2117 return 0;
2118
2119 // Always enable split spill mode, since we're effectively spilling to a
2120 // register.
2121 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2122 SE->reset(LREdit, SplitEditor::SM_Size);
2123
2124 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2125 if (Uses.size() <= 1)
2126 return 0;
2127
2128 LLVM_DEBUG(dbgs() << "Split around " << Uses.size()
2129 << " individual instrs.\n");
2130
2131 const TargetRegisterClass *SuperRC =
2132 TRI->getLargestLegalSuperClass(CurRC, *MF);
2133 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
2134 // Split around every non-copy instruction if this split will relax
2135 // the constraints on the virtual register.
2136 // Otherwise, splitting just inserts uncoalescable copies that do not help
2137 // the allocation.
2138 for (const auto &Use : Uses) {
2139 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Use))
2140 if (MI->isFullCopy() ||
2141 SuperRCNumAllocatableRegs ==
2142 getNumAllocatableRegsForConstraints(MI, VirtReg.reg(), SuperRC,
2143 TII, TRI, RCI)) {
2144 LLVM_DEBUG(dbgs() << " skip:\t" << Use << '\t' << *MI);
2145 continue;
2146 }
2147 SE->openIntv();
2148 SlotIndex SegStart = SE->enterIntvBefore(Use);
2149 SlotIndex SegStop = SE->leaveIntvAfter(Use);
2150 SE->useIntv(SegStart, SegStop);
2151 }
2152
2153 if (LREdit.empty()) {
2154 LLVM_DEBUG(dbgs() << "All uses were copies.\n");
2155 return 0;
2156 }
2157
2158 SmallVector<unsigned, 8> IntvMap;
2159 SE->finish(&IntvMap);
2160 DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
2161 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2162
2163 // Assign all new registers to RS_Spill. This was the last chance.
2164 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
2165 return 0;
2166 }
2167
2168 //===----------------------------------------------------------------------===//
2169 // Local Splitting
2170 //===----------------------------------------------------------------------===//
2171
2172 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
2173 /// in order to use PhysReg between two entries in SA->UseSlots.
2174 ///
2175 /// GapWeight[I] represents the gap between UseSlots[I] and UseSlots[I + 1].
2176 ///
calcGapWeights(MCRegister PhysReg,SmallVectorImpl<float> & GapWeight)2177 void RAGreedy::calcGapWeights(MCRegister PhysReg,
2178 SmallVectorImpl<float> &GapWeight) {
2179 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
2180 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
2181 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2182 const unsigned NumGaps = Uses.size()-1;
2183
2184 // Start and end points for the interference check.
2185 SlotIndex StartIdx =
2186 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
2187 SlotIndex StopIdx =
2188 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
2189
2190 GapWeight.assign(NumGaps, 0.0f);
2191
2192 // Add interference from each overlapping register.
2193 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2194 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
2195 .checkInterference())
2196 continue;
2197
2198 // We know that VirtReg is a continuous interval from FirstInstr to
2199 // LastInstr, so we don't need InterferenceQuery.
2200 //
2201 // Interference that overlaps an instruction is counted in both gaps
2202 // surrounding the instruction. The exception is interference before
2203 // StartIdx and after StopIdx.
2204 //
2205 LiveIntervalUnion::SegmentIter IntI =
2206 Matrix->getLiveUnions()[*Units] .find(StartIdx);
2207 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
2208 // Skip the gaps before IntI.
2209 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
2210 if (++Gap == NumGaps)
2211 break;
2212 if (Gap == NumGaps)
2213 break;
2214
2215 // Update the gaps covered by IntI.
2216 const float weight = IntI.value()->weight();
2217 for (; Gap != NumGaps; ++Gap) {
2218 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
2219 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
2220 break;
2221 }
2222 if (Gap == NumGaps)
2223 break;
2224 }
2225 }
2226
2227 // Add fixed interference.
2228 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2229 const LiveRange &LR = LIS->getRegUnit(*Units);
2230 LiveRange::const_iterator I = LR.find(StartIdx);
2231 LiveRange::const_iterator E = LR.end();
2232
2233 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
2234 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
2235 while (Uses[Gap+1].getBoundaryIndex() < I->start)
2236 if (++Gap == NumGaps)
2237 break;
2238 if (Gap == NumGaps)
2239 break;
2240
2241 for (; Gap != NumGaps; ++Gap) {
2242 GapWeight[Gap] = huge_valf;
2243 if (Uses[Gap+1].getBaseIndex() >= I->end)
2244 break;
2245 }
2246 if (Gap == NumGaps)
2247 break;
2248 }
2249 }
2250 }
2251
2252 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
2253 /// basic block.
2254 ///
tryLocalSplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs)2255 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2256 SmallVectorImpl<Register> &NewVRegs) {
2257 // TODO: the function currently only handles a single UseBlock; it should be
2258 // possible to generalize.
2259 if (SA->getUseBlocks().size() != 1)
2260 return 0;
2261
2262 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
2263
2264 // Note that it is possible to have an interval that is live-in or live-out
2265 // while only covering a single block - A phi-def can use undef values from
2266 // predecessors, and the block could be a single-block loop.
2267 // We don't bother doing anything clever about such a case, we simply assume
2268 // that the interval is continuous from FirstInstr to LastInstr. We should
2269 // make sure that we don't do anything illegal to such an interval, though.
2270
2271 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2272 if (Uses.size() <= 2)
2273 return 0;
2274 const unsigned NumGaps = Uses.size()-1;
2275
2276 LLVM_DEBUG({
2277 dbgs() << "tryLocalSplit: ";
2278 for (const auto &Use : Uses)
2279 dbgs() << ' ' << Use;
2280 dbgs() << '\n';
2281 });
2282
2283 // If VirtReg is live across any register mask operands, compute a list of
2284 // gaps with register masks.
2285 SmallVector<unsigned, 8> RegMaskGaps;
2286 if (Matrix->checkRegMaskInterference(VirtReg)) {
2287 // Get regmask slots for the whole block.
2288 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
2289 LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
2290 // Constrain to VirtReg's live range.
2291 unsigned RI =
2292 llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin();
2293 unsigned RE = RMS.size();
2294 for (unsigned I = 0; I != NumGaps && RI != RE; ++I) {
2295 // Look for Uses[I] <= RMS <= Uses[I + 1].
2296 assert(!SlotIndex::isEarlierInstr(RMS[RI], Uses[I]));
2297 if (SlotIndex::isEarlierInstr(Uses[I + 1], RMS[RI]))
2298 continue;
2299 // Skip a regmask on the same instruction as the last use. It doesn't
2300 // overlap the live range.
2301 if (SlotIndex::isSameInstr(Uses[I + 1], RMS[RI]) && I + 1 == NumGaps)
2302 break;
2303 LLVM_DEBUG(dbgs() << ' ' << RMS[RI] << ':' << Uses[I] << '-'
2304 << Uses[I + 1]);
2305 RegMaskGaps.push_back(I);
2306 // Advance ri to the next gap. A regmask on one of the uses counts in
2307 // both gaps.
2308 while (RI != RE && SlotIndex::isEarlierInstr(RMS[RI], Uses[I + 1]))
2309 ++RI;
2310 }
2311 LLVM_DEBUG(dbgs() << '\n');
2312 }
2313
2314 // Since we allow local split results to be split again, there is a risk of
2315 // creating infinite loops. It is tempting to require that the new live
2316 // ranges have less instructions than the original. That would guarantee
2317 // convergence, but it is too strict. A live range with 3 instructions can be
2318 // split 2+3 (including the COPY), and we want to allow that.
2319 //
2320 // Instead we use these rules:
2321 //
2322 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
2323 // noop split, of course).
2324 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
2325 // the new ranges must have fewer instructions than before the split.
2326 // 3. New ranges with the same number of instructions are marked RS_Split2,
2327 // smaller ranges are marked RS_New.
2328 //
2329 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
2330 // excessive splitting and infinite loops.
2331 //
2332 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
2333
2334 // Best split candidate.
2335 unsigned BestBefore = NumGaps;
2336 unsigned BestAfter = 0;
2337 float BestDiff = 0;
2338
2339 const float blockFreq =
2340 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
2341 (1.0f / MBFI->getEntryFreq());
2342 SmallVector<float, 8> GapWeight;
2343
2344 for (MCPhysReg PhysReg : Order) {
2345 assert(PhysReg);
2346 // Keep track of the largest spill weight that would need to be evicted in
2347 // order to make use of PhysReg between UseSlots[I] and UseSlots[I + 1].
2348 calcGapWeights(PhysReg, GapWeight);
2349
2350 // Remove any gaps with regmask clobbers.
2351 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
2352 for (unsigned I = 0, E = RegMaskGaps.size(); I != E; ++I)
2353 GapWeight[RegMaskGaps[I]] = huge_valf;
2354
2355 // Try to find the best sequence of gaps to close.
2356 // The new spill weight must be larger than any gap interference.
2357
2358 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
2359 unsigned SplitBefore = 0, SplitAfter = 1;
2360
2361 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
2362 // It is the spill weight that needs to be evicted.
2363 float MaxGap = GapWeight[0];
2364
2365 while (true) {
2366 // Live before/after split?
2367 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
2368 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
2369
2370 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore]
2371 << '-' << Uses[SplitAfter] << " I=" << MaxGap);
2372
2373 // Stop before the interval gets so big we wouldn't be making progress.
2374 if (!LiveBefore && !LiveAfter) {
2375 LLVM_DEBUG(dbgs() << " all\n");
2376 break;
2377 }
2378 // Should the interval be extended or shrunk?
2379 bool Shrink = true;
2380
2381 // How many gaps would the new range have?
2382 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
2383
2384 // Legally, without causing looping?
2385 bool Legal = !ProgressRequired || NewGaps < NumGaps;
2386
2387 if (Legal && MaxGap < huge_valf) {
2388 // Estimate the new spill weight. Each instruction reads or writes the
2389 // register. Conservatively assume there are no read-modify-write
2390 // instructions.
2391 //
2392 // Try to guess the size of the new interval.
2393 const float EstWeight = normalizeSpillWeight(
2394 blockFreq * (NewGaps + 1),
2395 Uses[SplitBefore].distance(Uses[SplitAfter]) +
2396 (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
2397 1);
2398 // Would this split be possible to allocate?
2399 // Never allocate all gaps, we wouldn't be making progress.
2400 LLVM_DEBUG(dbgs() << " w=" << EstWeight);
2401 if (EstWeight * Hysteresis >= MaxGap) {
2402 Shrink = false;
2403 float Diff = EstWeight - MaxGap;
2404 if (Diff > BestDiff) {
2405 LLVM_DEBUG(dbgs() << " (best)");
2406 BestDiff = Hysteresis * Diff;
2407 BestBefore = SplitBefore;
2408 BestAfter = SplitAfter;
2409 }
2410 }
2411 }
2412
2413 // Try to shrink.
2414 if (Shrink) {
2415 if (++SplitBefore < SplitAfter) {
2416 LLVM_DEBUG(dbgs() << " shrink\n");
2417 // Recompute the max when necessary.
2418 if (GapWeight[SplitBefore - 1] >= MaxGap) {
2419 MaxGap = GapWeight[SplitBefore];
2420 for (unsigned I = SplitBefore + 1; I != SplitAfter; ++I)
2421 MaxGap = std::max(MaxGap, GapWeight[I]);
2422 }
2423 continue;
2424 }
2425 MaxGap = 0;
2426 }
2427
2428 // Try to extend the interval.
2429 if (SplitAfter >= NumGaps) {
2430 LLVM_DEBUG(dbgs() << " end\n");
2431 break;
2432 }
2433
2434 LLVM_DEBUG(dbgs() << " extend\n");
2435 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
2436 }
2437 }
2438
2439 // Didn't find any candidates?
2440 if (BestBefore == NumGaps)
2441 return 0;
2442
2443 LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-'
2444 << Uses[BestAfter] << ", " << BestDiff << ", "
2445 << (BestAfter - BestBefore + 1) << " instrs\n");
2446
2447 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2448 SE->reset(LREdit);
2449
2450 SE->openIntv();
2451 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
2452 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
2453 SE->useIntv(SegStart, SegStop);
2454 SmallVector<unsigned, 8> IntvMap;
2455 SE->finish(&IntvMap);
2456 DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS);
2457
2458 // If the new range has the same number of instructions as before, mark it as
2459 // RS_Split2 so the next split will be forced to make progress. Otherwise,
2460 // leave the new intervals as RS_New so they can compete.
2461 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
2462 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
2463 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
2464 if (NewGaps >= NumGaps) {
2465 LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: ");
2466 assert(!ProgressRequired && "Didn't make progress when it was required.");
2467 for (unsigned I = 0, E = IntvMap.size(); I != E; ++I)
2468 if (IntvMap[I] == 1) {
2469 setStage(LIS->getInterval(LREdit.get(I)), RS_Split2);
2470 LLVM_DEBUG(dbgs() << printReg(LREdit.get(I)));
2471 }
2472 LLVM_DEBUG(dbgs() << '\n');
2473 }
2474 ++NumLocalSplits;
2475
2476 return 0;
2477 }
2478
2479 //===----------------------------------------------------------------------===//
2480 // Live Range Splitting
2481 //===----------------------------------------------------------------------===//
2482
2483 /// trySplit - Try to split VirtReg or one of its interferences, making it
2484 /// assignable.
2485 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
trySplit(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs,const SmallVirtRegSet & FixedRegisters)2486 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
2487 SmallVectorImpl<Register> &NewVRegs,
2488 const SmallVirtRegSet &FixedRegisters) {
2489 // Ranges must be Split2 or less.
2490 if (getStage(VirtReg) >= RS_Spill)
2491 return 0;
2492
2493 // Local intervals are handled separately.
2494 if (LIS->intervalIsInOneMBB(VirtReg)) {
2495 NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName,
2496 TimerGroupDescription, TimePassesIsEnabled);
2497 SA->analyze(&VirtReg);
2498 Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
2499 if (PhysReg || !NewVRegs.empty())
2500 return PhysReg;
2501 return tryInstructionSplit(VirtReg, Order, NewVRegs);
2502 }
2503
2504 NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName,
2505 TimerGroupDescription, TimePassesIsEnabled);
2506
2507 SA->analyze(&VirtReg);
2508
2509 // FIXME: SplitAnalysis may repair broken live ranges coming from the
2510 // coalescer. That may cause the range to become allocatable which means that
2511 // tryRegionSplit won't be making progress. This check should be replaced with
2512 // an assertion when the coalescer is fixed.
2513 if (SA->didRepairRange()) {
2514 // VirtReg has changed, so all cached queries are invalid.
2515 Matrix->invalidateVirtRegs();
2516 if (Register PhysReg = tryAssign(VirtReg, Order, NewVRegs, FixedRegisters))
2517 return PhysReg;
2518 }
2519
2520 // First try to split around a region spanning multiple blocks. RS_Split2
2521 // ranges already made dubious progress with region splitting, so they go
2522 // straight to single block splitting.
2523 if (getStage(VirtReg) < RS_Split2) {
2524 MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
2525 if (PhysReg || !NewVRegs.empty())
2526 return PhysReg;
2527 }
2528
2529 // Then isolate blocks.
2530 return tryBlockSplit(VirtReg, Order, NewVRegs);
2531 }
2532
2533 //===----------------------------------------------------------------------===//
2534 // Last Chance Recoloring
2535 //===----------------------------------------------------------------------===//
2536
2537 /// Return true if \p reg has any tied def operand.
hasTiedDef(MachineRegisterInfo * MRI,unsigned reg)2538 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) {
2539 for (const MachineOperand &MO : MRI->def_operands(reg))
2540 if (MO.isTied())
2541 return true;
2542
2543 return false;
2544 }
2545
2546 /// mayRecolorAllInterferences - Check if the virtual registers that
2547 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
2548 /// recolored to free \p PhysReg.
2549 /// When true is returned, \p RecoloringCandidates has been augmented with all
2550 /// the live intervals that need to be recolored in order to free \p PhysReg
2551 /// for \p VirtReg.
2552 /// \p FixedRegisters contains all the virtual registers that cannot be
2553 /// recolored.
mayRecolorAllInterferences(MCRegister PhysReg,LiveInterval & VirtReg,SmallLISet & RecoloringCandidates,const SmallVirtRegSet & FixedRegisters)2554 bool RAGreedy::mayRecolorAllInterferences(
2555 MCRegister PhysReg, LiveInterval &VirtReg, SmallLISet &RecoloringCandidates,
2556 const SmallVirtRegSet &FixedRegisters) {
2557 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg());
2558
2559 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2560 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
2561 // If there is LastChanceRecoloringMaxInterference or more interferences,
2562 // chances are one would not be recolorable.
2563 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
2564 LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
2565 LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
2566 CutOffInfo |= CO_Interf;
2567 return false;
2568 }
2569 for (LiveInterval *Intf : reverse(Q.interferingVRegs())) {
2570 // If Intf is done and sit on the same register class as VirtReg,
2571 // it would not be recolorable as it is in the same state as VirtReg.
2572 // However, if VirtReg has tied defs and Intf doesn't, then
2573 // there is still a point in examining if it can be recolorable.
2574 if (((getStage(*Intf) == RS_Done &&
2575 MRI->getRegClass(Intf->reg()) == CurRC) &&
2576 !(hasTiedDef(MRI, VirtReg.reg()) &&
2577 !hasTiedDef(MRI, Intf->reg()))) ||
2578 FixedRegisters.count(Intf->reg())) {
2579 LLVM_DEBUG(
2580 dbgs() << "Early abort: the interference is not recolorable.\n");
2581 return false;
2582 }
2583 RecoloringCandidates.insert(Intf);
2584 }
2585 }
2586 return true;
2587 }
2588
2589 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
2590 /// its interferences.
2591 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
2592 /// virtual register that was using it. The recoloring process may recursively
2593 /// use the last chance recoloring. Therefore, when a virtual register has been
2594 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
2595 /// be last-chance-recolored again during this recoloring "session".
2596 /// E.g.,
2597 /// Let
2598 /// vA can use {R1, R2 }
2599 /// vB can use { R2, R3}
2600 /// vC can use {R1 }
2601 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
2602 /// instance) and they all interfere.
2603 ///
2604 /// vA is assigned R1
2605 /// vB is assigned R2
2606 /// vC tries to evict vA but vA is already done.
2607 /// Regular register allocation fails.
2608 ///
2609 /// Last chance recoloring kicks in:
2610 /// vC does as if vA was evicted => vC uses R1.
2611 /// vC is marked as fixed.
2612 /// vA needs to find a color.
2613 /// None are available.
2614 /// vA cannot evict vC: vC is a fixed virtual register now.
2615 /// vA does as if vB was evicted => vA uses R2.
2616 /// vB needs to find a color.
2617 /// R3 is available.
2618 /// Recoloring => vC = R1, vA = R2, vB = R3
2619 ///
2620 /// \p Order defines the preferred allocation order for \p VirtReg.
2621 /// \p NewRegs will contain any new virtual register that have been created
2622 /// (split, spill) during the process and that must be assigned.
2623 /// \p FixedRegisters contains all the virtual registers that cannot be
2624 /// recolored.
2625 /// \p Depth gives the current depth of the last chance recoloring.
2626 /// \return a physical register that can be used for VirtReg or ~0u if none
2627 /// exists.
tryLastChanceRecoloring(LiveInterval & VirtReg,AllocationOrder & Order,SmallVectorImpl<Register> & NewVRegs,SmallVirtRegSet & FixedRegisters,unsigned Depth)2628 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
2629 AllocationOrder &Order,
2630 SmallVectorImpl<Register> &NewVRegs,
2631 SmallVirtRegSet &FixedRegisters,
2632 unsigned Depth) {
2633 if (!TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg))
2634 return ~0u;
2635
2636 LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
2637 // Ranges must be Done.
2638 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
2639 "Last chance recoloring should really be last chance");
2640 // Set the max depth to LastChanceRecoloringMaxDepth.
2641 // We may want to reconsider that if we end up with a too large search space
2642 // for target with hundreds of registers.
2643 // Indeed, in that case we may want to cut the search space earlier.
2644 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
2645 LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
2646 CutOffInfo |= CO_Depth;
2647 return ~0u;
2648 }
2649
2650 // Set of Live intervals that will need to be recolored.
2651 SmallLISet RecoloringCandidates;
2652 // Record the original mapping virtual register to physical register in case
2653 // the recoloring fails.
2654 DenseMap<Register, MCRegister> VirtRegToPhysReg;
2655 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
2656 // this recoloring "session".
2657 assert(!FixedRegisters.count(VirtReg.reg()));
2658 FixedRegisters.insert(VirtReg.reg());
2659 SmallVector<Register, 4> CurrentNewVRegs;
2660
2661 for (MCRegister PhysReg : Order) {
2662 assert(PhysReg.isValid());
2663 LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
2664 << printReg(PhysReg, TRI) << '\n');
2665 RecoloringCandidates.clear();
2666 VirtRegToPhysReg.clear();
2667 CurrentNewVRegs.clear();
2668
2669 // It is only possible to recolor virtual register interference.
2670 if (Matrix->checkInterference(VirtReg, PhysReg) >
2671 LiveRegMatrix::IK_VirtReg) {
2672 LLVM_DEBUG(
2673 dbgs() << "Some interferences are not with virtual registers.\n");
2674
2675 continue;
2676 }
2677
2678 // Early give up on this PhysReg if it is obvious we cannot recolor all
2679 // the interferences.
2680 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2681 FixedRegisters)) {
2682 LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
2683 continue;
2684 }
2685
2686 // RecoloringCandidates contains all the virtual registers that interfer
2687 // with VirtReg on PhysReg (or one of its aliases).
2688 // Enqueue them for recoloring and perform the actual recoloring.
2689 PQueue RecoloringQueue;
2690 for (LiveInterval *RC : RecoloringCandidates) {
2691 Register ItVirtReg = RC->reg();
2692 enqueue(RecoloringQueue, RC);
2693 assert(VRM->hasPhys(ItVirtReg) &&
2694 "Interferences are supposed to be with allocated variables");
2695
2696 // Record the current allocation.
2697 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
2698 // unset the related struct.
2699 Matrix->unassign(*RC);
2700 }
2701
2702 // Do as if VirtReg was assigned to PhysReg so that the underlying
2703 // recoloring has the right information about the interferes and
2704 // available colors.
2705 Matrix->assign(VirtReg, PhysReg);
2706
2707 // Save the current recoloring state.
2708 // If we cannot recolor all the interferences, we will have to start again
2709 // at this point for the next physical register.
2710 SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2711 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2712 FixedRegisters, Depth)) {
2713 // Push the queued vregs into the main queue.
2714 for (Register NewVReg : CurrentNewVRegs)
2715 NewVRegs.push_back(NewVReg);
2716 // Do not mess up with the global assignment process.
2717 // I.e., VirtReg must be unassigned.
2718 Matrix->unassign(VirtReg);
2719 return PhysReg;
2720 }
2721
2722 LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
2723 << printReg(PhysReg, TRI) << '\n');
2724
2725 // The recoloring attempt failed, undo the changes.
2726 FixedRegisters = SaveFixedRegisters;
2727 Matrix->unassign(VirtReg);
2728
2729 // For a newly created vreg which is also in RecoloringCandidates,
2730 // don't add it to NewVRegs because its physical register will be restored
2731 // below. Other vregs in CurrentNewVRegs are created by calling
2732 // selectOrSplit and should be added into NewVRegs.
2733 for (Register &R : CurrentNewVRegs) {
2734 if (RecoloringCandidates.count(&LIS->getInterval(R)))
2735 continue;
2736 NewVRegs.push_back(R);
2737 }
2738
2739 for (LiveInterval *RC : RecoloringCandidates) {
2740 Register ItVirtReg = RC->reg();
2741 if (VRM->hasPhys(ItVirtReg))
2742 Matrix->unassign(*RC);
2743 MCRegister ItPhysReg = VirtRegToPhysReg[ItVirtReg];
2744 Matrix->assign(*RC, ItPhysReg);
2745 }
2746 }
2747
2748 // Last chance recoloring did not worked either, give up.
2749 return ~0u;
2750 }
2751
2752 /// tryRecoloringCandidates - Try to assign a new color to every register
2753 /// in \RecoloringQueue.
2754 /// \p NewRegs will contain any new virtual register created during the
2755 /// recoloring process.
2756 /// \p FixedRegisters[in/out] contains all the registers that have been
2757 /// recolored.
2758 /// \return true if all virtual registers in RecoloringQueue were successfully
2759 /// recolored, false otherwise.
tryRecoloringCandidates(PQueue & RecoloringQueue,SmallVectorImpl<Register> & NewVRegs,SmallVirtRegSet & FixedRegisters,unsigned Depth)2760 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2761 SmallVectorImpl<Register> &NewVRegs,
2762 SmallVirtRegSet &FixedRegisters,
2763 unsigned Depth) {
2764 while (!RecoloringQueue.empty()) {
2765 LiveInterval *LI = dequeue(RecoloringQueue);
2766 LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
2767 MCRegister PhysReg =
2768 selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
2769 // When splitting happens, the live-range may actually be empty.
2770 // In that case, this is okay to continue the recoloring even
2771 // if we did not find an alternative color for it. Indeed,
2772 // there will not be anything to color for LI in the end.
2773 if (PhysReg == ~0u || (!PhysReg && !LI->empty()))
2774 return false;
2775
2776 if (!PhysReg) {
2777 assert(LI->empty() && "Only empty live-range do not require a register");
2778 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2779 << " succeeded. Empty LI.\n");
2780 continue;
2781 }
2782 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2783 << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
2784
2785 Matrix->assign(*LI, PhysReg);
2786 FixedRegisters.insert(LI->reg());
2787 }
2788 return true;
2789 }
2790
2791 //===----------------------------------------------------------------------===//
2792 // Main Entry Point
2793 //===----------------------------------------------------------------------===//
2794
selectOrSplit(LiveInterval & VirtReg,SmallVectorImpl<Register> & NewVRegs)2795 MCRegister RAGreedy::selectOrSplit(LiveInterval &VirtReg,
2796 SmallVectorImpl<Register> &NewVRegs) {
2797 CutOffInfo = CO_None;
2798 LLVMContext &Ctx = MF->getFunction().getContext();
2799 SmallVirtRegSet FixedRegisters;
2800 MCRegister Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
2801 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2802 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2803 if (CutOffEncountered == CO_Depth)
2804 Ctx.emitError("register allocation failed: maximum depth for recoloring "
2805 "reached. Use -fexhaustive-register-search to skip "
2806 "cutoffs");
2807 else if (CutOffEncountered == CO_Interf)
2808 Ctx.emitError("register allocation failed: maximum interference for "
2809 "recoloring reached. Use -fexhaustive-register-search "
2810 "to skip cutoffs");
2811 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2812 Ctx.emitError("register allocation failed: maximum interference and "
2813 "depth for recoloring reached. Use "
2814 "-fexhaustive-register-search to skip cutoffs");
2815 }
2816 return Reg;
2817 }
2818
2819 /// Using a CSR for the first time has a cost because it causes push|pop
2820 /// to be added to prologue|epilogue. Splitting a cold section of the live
2821 /// range can have lower cost than using the CSR for the first time;
2822 /// Spilling a live range in the cold path can have lower cost than using
2823 /// the CSR for the first time. Returns the physical register if we decide
2824 /// to use the CSR; otherwise return 0.
2825 MCRegister
tryAssignCSRFirstTime(LiveInterval & VirtReg,AllocationOrder & Order,MCRegister PhysReg,uint8_t & CostPerUseLimit,SmallVectorImpl<Register> & NewVRegs)2826 RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
2827 MCRegister PhysReg, uint8_t &CostPerUseLimit,
2828 SmallVectorImpl<Register> &NewVRegs) {
2829 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
2830 // We choose spill over using the CSR for the first time if the spill cost
2831 // is lower than CSRCost.
2832 SA->analyze(&VirtReg);
2833 if (calcSpillCost() >= CSRCost)
2834 return PhysReg;
2835
2836 // We are going to spill, set CostPerUseLimit to 1 to make sure that
2837 // we will not use a callee-saved register in tryEvict.
2838 CostPerUseLimit = 1;
2839 return 0;
2840 }
2841 if (getStage(VirtReg) < RS_Split) {
2842 // We choose pre-splitting over using the CSR for the first time if
2843 // the cost of splitting is lower than CSRCost.
2844 SA->analyze(&VirtReg);
2845 unsigned NumCands = 0;
2846 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2847 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2848 NumCands, true /*IgnoreCSR*/);
2849 if (BestCand == NoCand)
2850 // Use the CSR if we can't find a region split below CSRCost.
2851 return PhysReg;
2852
2853 // Perform the actual pre-splitting.
2854 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2855 return 0;
2856 }
2857 return PhysReg;
2858 }
2859
aboutToRemoveInterval(LiveInterval & LI)2860 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) {
2861 // Do not keep invalid information around.
2862 SetOfBrokenHints.remove(&LI);
2863 }
2864
initializeCSRCost()2865 void RAGreedy::initializeCSRCost() {
2866 // We use the larger one out of the command-line option and the value report
2867 // by TRI.
2868 CSRCost = BlockFrequency(
2869 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
2870 if (!CSRCost.getFrequency())
2871 return;
2872
2873 // Raw cost is relative to Entry == 2^14; scale it appropriately.
2874 uint64_t ActualEntry = MBFI->getEntryFreq();
2875 if (!ActualEntry) {
2876 CSRCost = 0;
2877 return;
2878 }
2879 uint64_t FixedEntry = 1 << 14;
2880 if (ActualEntry < FixedEntry)
2881 CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2882 else if (ActualEntry <= UINT32_MAX)
2883 // Invert the fraction and divide.
2884 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2885 else
2886 // Can't use BranchProbability in general, since it takes 32-bit numbers.
2887 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2888 }
2889
2890 /// Collect the hint info for \p Reg.
2891 /// The results are stored into \p Out.
2892 /// \p Out is not cleared before being populated.
collectHintInfo(Register Reg,HintsInfo & Out)2893 void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) {
2894 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
2895 if (!Instr.isFullCopy())
2896 continue;
2897 // Look for the other end of the copy.
2898 Register OtherReg = Instr.getOperand(0).getReg();
2899 if (OtherReg == Reg) {
2900 OtherReg = Instr.getOperand(1).getReg();
2901 if (OtherReg == Reg)
2902 continue;
2903 }
2904 // Get the current assignment.
2905 MCRegister OtherPhysReg =
2906 OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg);
2907 // Push the collected information.
2908 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2909 OtherPhysReg));
2910 }
2911 }
2912
2913 /// Using the given \p List, compute the cost of the broken hints if
2914 /// \p PhysReg was used.
2915 /// \return The cost of \p List for \p PhysReg.
getBrokenHintFreq(const HintsInfo & List,MCRegister PhysReg)2916 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
2917 MCRegister PhysReg) {
2918 BlockFrequency Cost = 0;
2919 for (const HintInfo &Info : List) {
2920 if (Info.PhysReg != PhysReg)
2921 Cost += Info.Freq;
2922 }
2923 return Cost;
2924 }
2925
2926 /// Using the register assigned to \p VirtReg, try to recolor
2927 /// all the live ranges that are copy-related with \p VirtReg.
2928 /// The recoloring is then propagated to all the live-ranges that have
2929 /// been recolored and so on, until no more copies can be coalesced or
2930 /// it is not profitable.
2931 /// For a given live range, profitability is determined by the sum of the
2932 /// frequencies of the non-identity copies it would introduce with the old
2933 /// and new register.
tryHintRecoloring(LiveInterval & VirtReg)2934 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
2935 // We have a broken hint, check if it is possible to fix it by
2936 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2937 // some register and PhysReg may be available for the other live-ranges.
2938 SmallSet<Register, 4> Visited;
2939 SmallVector<unsigned, 2> RecoloringCandidates;
2940 HintsInfo Info;
2941 Register Reg = VirtReg.reg();
2942 MCRegister PhysReg = VRM->getPhys(Reg);
2943 // Start the recoloring algorithm from the input live-interval, then
2944 // it will propagate to the ones that are copy-related with it.
2945 Visited.insert(Reg);
2946 RecoloringCandidates.push_back(Reg);
2947
2948 LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
2949 << '(' << printReg(PhysReg, TRI) << ")\n");
2950
2951 do {
2952 Reg = RecoloringCandidates.pop_back_val();
2953
2954 // We cannot recolor physical register.
2955 if (Register::isPhysicalRegister(Reg))
2956 continue;
2957
2958 // This may be a skipped class
2959 if (!VRM->hasPhys(Reg)) {
2960 assert(!ShouldAllocateClass(*TRI, *MRI->getRegClass(Reg)) &&
2961 "We have an unallocated variable which should have been handled");
2962 continue;
2963 }
2964
2965 // Get the live interval mapped with this virtual register to be able
2966 // to check for the interference with the new color.
2967 LiveInterval &LI = LIS->getInterval(Reg);
2968 MCRegister CurrPhys = VRM->getPhys(Reg);
2969 // Check that the new color matches the register class constraints and
2970 // that it is free for this live range.
2971 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
2972 Matrix->checkInterference(LI, PhysReg)))
2973 continue;
2974
2975 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
2976 << ") is recolorable.\n");
2977
2978 // Gather the hint info.
2979 Info.clear();
2980 collectHintInfo(Reg, Info);
2981 // Check if recoloring the live-range will increase the cost of the
2982 // non-identity copies.
2983 if (CurrPhys != PhysReg) {
2984 LLVM_DEBUG(dbgs() << "Checking profitability:\n");
2985 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2986 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2987 LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
2988 << "\nNew Cost: " << NewCopiesCost.getFrequency()
2989 << '\n');
2990 if (OldCopiesCost < NewCopiesCost) {
2991 LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
2992 continue;
2993 }
2994 // At this point, the cost is either cheaper or equal. If it is
2995 // equal, we consider this is profitable because it may expose
2996 // more recoloring opportunities.
2997 LLVM_DEBUG(dbgs() << "=> Profitable.\n");
2998 // Recolor the live-range.
2999 Matrix->unassign(LI);
3000 Matrix->assign(LI, PhysReg);
3001 }
3002 // Push all copy-related live-ranges to keep reconciling the broken
3003 // hints.
3004 for (const HintInfo &HI : Info) {
3005 if (Visited.insert(HI.Reg).second)
3006 RecoloringCandidates.push_back(HI.Reg);
3007 }
3008 } while (!RecoloringCandidates.empty());
3009 }
3010
3011 /// Try to recolor broken hints.
3012 /// Broken hints may be repaired by recoloring when an evicted variable
3013 /// freed up a register for a larger live-range.
3014 /// Consider the following example:
3015 /// BB1:
3016 /// a =
3017 /// b =
3018 /// BB2:
3019 /// ...
3020 /// = b
3021 /// = a
3022 /// Let us assume b gets split:
3023 /// BB1:
3024 /// a =
3025 /// b =
3026 /// BB2:
3027 /// c = b
3028 /// ...
3029 /// d = c
3030 /// = d
3031 /// = a
3032 /// Because of how the allocation work, b, c, and d may be assigned different
3033 /// colors. Now, if a gets evicted later:
3034 /// BB1:
3035 /// a =
3036 /// st a, SpillSlot
3037 /// b =
3038 /// BB2:
3039 /// c = b
3040 /// ...
3041 /// d = c
3042 /// = d
3043 /// e = ld SpillSlot
3044 /// = e
3045 /// This is likely that we can assign the same register for b, c, and d,
3046 /// getting rid of 2 copies.
tryHintsRecoloring()3047 void RAGreedy::tryHintsRecoloring() {
3048 for (LiveInterval *LI : SetOfBrokenHints) {
3049 assert(Register::isVirtualRegister(LI->reg()) &&
3050 "Recoloring is possible only for virtual registers");
3051 // Some dead defs may be around (e.g., because of debug uses).
3052 // Ignore those.
3053 if (!VRM->hasPhys(LI->reg()))
3054 continue;
3055 tryHintRecoloring(*LI);
3056 }
3057 }
3058
selectOrSplitImpl(LiveInterval & VirtReg,SmallVectorImpl<Register> & NewVRegs,SmallVirtRegSet & FixedRegisters,unsigned Depth)3059 MCRegister RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
3060 SmallVectorImpl<Register> &NewVRegs,
3061 SmallVirtRegSet &FixedRegisters,
3062 unsigned Depth) {
3063 uint8_t CostPerUseLimit = uint8_t(~0u);
3064 // First try assigning a free register.
3065 auto Order =
3066 AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix);
3067 if (MCRegister PhysReg =
3068 tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) {
3069 // If VirtReg got an assignment, the eviction info is no longer relevant.
3070 LastEvicted.clearEvicteeInfo(VirtReg.reg());
3071 // When NewVRegs is not empty, we may have made decisions such as evicting
3072 // a virtual register, go with the earlier decisions and use the physical
3073 // register.
3074 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
3075 NewVRegs.empty()) {
3076 MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
3077 CostPerUseLimit, NewVRegs);
3078 if (CSRReg || !NewVRegs.empty())
3079 // Return now if we decide to use a CSR or create new vregs due to
3080 // pre-splitting.
3081 return CSRReg;
3082 } else
3083 return PhysReg;
3084 }
3085
3086 LiveRangeStage Stage = getStage(VirtReg);
3087 LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade "
3088 << ExtraRegInfo[VirtReg.reg()].Cascade << '\n');
3089
3090 // Try to evict a less worthy live range, but only for ranges from the primary
3091 // queue. The RS_Split ranges already failed to do this, and they should not
3092 // get a second chance until they have been split.
3093 if (Stage != RS_Split)
3094 if (Register PhysReg =
3095 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit,
3096 FixedRegisters)) {
3097 Register Hint = MRI->getSimpleHint(VirtReg.reg());
3098 // If VirtReg has a hint and that hint is broken record this
3099 // virtual register as a recoloring candidate for broken hint.
3100 // Indeed, since we evicted a variable in its neighborhood it is
3101 // likely we can at least partially recolor some of the
3102 // copy-related live-ranges.
3103 if (Hint && Hint != PhysReg)
3104 SetOfBrokenHints.insert(&VirtReg);
3105 // If VirtReg eviction someone, the eviction info for it as an evictee is
3106 // no longer relevant.
3107 LastEvicted.clearEvicteeInfo(VirtReg.reg());
3108 return PhysReg;
3109 }
3110
3111 assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs");
3112
3113 // The first time we see a live range, don't try to split or spill.
3114 // Wait until the second time, when all smaller ranges have been allocated.
3115 // This gives a better picture of the interference to split around.
3116 if (Stage < RS_Split) {
3117 setStage(VirtReg, RS_Split);
3118 LLVM_DEBUG(dbgs() << "wait for second round\n");
3119 NewVRegs.push_back(VirtReg.reg());
3120 return 0;
3121 }
3122
3123 if (Stage < RS_Spill) {
3124 // Try splitting VirtReg or interferences.
3125 unsigned NewVRegSizeBefore = NewVRegs.size();
3126 Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters);
3127 if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) {
3128 // If VirtReg got split, the eviction info is no longer relevant.
3129 LastEvicted.clearEvicteeInfo(VirtReg.reg());
3130 return PhysReg;
3131 }
3132 }
3133
3134 // If we couldn't allocate a register from spilling, there is probably some
3135 // invalid inline assembly. The base class will report it.
3136 if (Stage >= RS_Done || !VirtReg.isSpillable())
3137 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
3138 Depth);
3139
3140 // Finally spill VirtReg itself.
3141 if ((EnableDeferredSpilling ||
3142 TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) &&
3143 getStage(VirtReg) < RS_Memory) {
3144 // TODO: This is experimental and in particular, we do not model
3145 // the live range splitting done by spilling correctly.
3146 // We would need a deep integration with the spiller to do the
3147 // right thing here. Anyway, that is still good for early testing.
3148 setStage(VirtReg, RS_Memory);
3149 LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
3150 NewVRegs.push_back(VirtReg.reg());
3151 } else {
3152 NamedRegionTimer T("spill", "Spiller", TimerGroupName,
3153 TimerGroupDescription, TimePassesIsEnabled);
3154 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
3155 spiller().spill(LRE);
3156 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
3157
3158 // Tell LiveDebugVariables about the new ranges. Ranges not being covered by
3159 // the new regs are kept in LDV (still mapping to the old register), until
3160 // we rewrite spilled locations in LDV at a later stage.
3161 DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS);
3162
3163 if (VerifyEnabled)
3164 MF->verify(this, "After spilling");
3165 }
3166
3167 // The live virtual register requesting allocation was spilled, so tell
3168 // the caller not to allocate anything during this round.
3169 return 0;
3170 }
3171
report(MachineOptimizationRemarkMissed & R)3172 void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) {
3173 using namespace ore;
3174 if (Spills) {
3175 R << NV("NumSpills", Spills) << " spills ";
3176 R << NV("TotalSpillsCost", SpillsCost) << " total spills cost ";
3177 }
3178 if (FoldedSpills) {
3179 R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
3180 R << NV("TotalFoldedSpillsCost", FoldedSpillsCost)
3181 << " total folded spills cost ";
3182 }
3183 if (Reloads) {
3184 R << NV("NumReloads", Reloads) << " reloads ";
3185 R << NV("TotalReloadsCost", ReloadsCost) << " total reloads cost ";
3186 }
3187 if (FoldedReloads) {
3188 R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
3189 R << NV("TotalFoldedReloadsCost", FoldedReloadsCost)
3190 << " total folded reloads cost ";
3191 }
3192 if (ZeroCostFoldedReloads)
3193 R << NV("NumZeroCostFoldedReloads", ZeroCostFoldedReloads)
3194 << " zero cost folded reloads ";
3195 if (Copies) {
3196 R << NV("NumVRCopies", Copies) << " virtual registers copies ";
3197 R << NV("TotalCopiesCost", CopiesCost) << " total copies cost ";
3198 }
3199 }
3200
computeStats(MachineBasicBlock & MBB)3201 RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &MBB) {
3202 RAGreedyStats Stats;
3203 const MachineFrameInfo &MFI = MF->getFrameInfo();
3204 int FI;
3205
3206 auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) {
3207 return MFI.isSpillSlotObjectIndex(cast<FixedStackPseudoSourceValue>(
3208 A->getPseudoValue())->getFrameIndex());
3209 };
3210 auto isPatchpointInstr = [](const MachineInstr &MI) {
3211 return MI.getOpcode() == TargetOpcode::PATCHPOINT ||
3212 MI.getOpcode() == TargetOpcode::STACKMAP ||
3213 MI.getOpcode() == TargetOpcode::STATEPOINT;
3214 };
3215 for (MachineInstr &MI : MBB) {
3216 if (MI.isCopy()) {
3217 MachineOperand &Dest = MI.getOperand(0);
3218 MachineOperand &Src = MI.getOperand(1);
3219 if (Dest.isReg() && Src.isReg() && Dest.getReg().isVirtual() &&
3220 Src.getReg().isVirtual())
3221 ++Stats.Copies;
3222 continue;
3223 }
3224
3225 SmallVector<const MachineMemOperand *, 2> Accesses;
3226 if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
3227 ++Stats.Reloads;
3228 continue;
3229 }
3230 if (TII->isStoreToStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) {
3231 ++Stats.Spills;
3232 continue;
3233 }
3234 if (TII->hasLoadFromStackSlot(MI, Accesses) &&
3235 llvm::any_of(Accesses, isSpillSlotAccess)) {
3236 if (!isPatchpointInstr(MI)) {
3237 Stats.FoldedReloads += Accesses.size();
3238 continue;
3239 }
3240 // For statepoint there may be folded and zero cost folded stack reloads.
3241 std::pair<unsigned, unsigned> NonZeroCostRange =
3242 TII->getPatchpointUnfoldableRange(MI);
3243 SmallSet<unsigned, 16> FoldedReloads;
3244 SmallSet<unsigned, 16> ZeroCostFoldedReloads;
3245 for (unsigned Idx = 0, E = MI.getNumOperands(); Idx < E; ++Idx) {
3246 MachineOperand &MO = MI.getOperand(Idx);
3247 if (!MO.isFI() || !MFI.isSpillSlotObjectIndex(MO.getIndex()))
3248 continue;
3249 if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second)
3250 FoldedReloads.insert(MO.getIndex());
3251 else
3252 ZeroCostFoldedReloads.insert(MO.getIndex());
3253 }
3254 // If stack slot is used in folded reload it is not zero cost then.
3255 for (unsigned Slot : FoldedReloads)
3256 ZeroCostFoldedReloads.erase(Slot);
3257 Stats.FoldedReloads += FoldedReloads.size();
3258 Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.size();
3259 continue;
3260 }
3261 Accesses.clear();
3262 if (TII->hasStoreToStackSlot(MI, Accesses) &&
3263 llvm::any_of(Accesses, isSpillSlotAccess)) {
3264 Stats.FoldedSpills += Accesses.size();
3265 }
3266 }
3267 // Set cost of collected statistic by multiplication to relative frequency of
3268 // this basic block.
3269 float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&MBB);
3270 Stats.ReloadsCost = RelFreq * Stats.Reloads;
3271 Stats.FoldedReloadsCost = RelFreq * Stats.FoldedReloads;
3272 Stats.SpillsCost = RelFreq * Stats.Spills;
3273 Stats.FoldedSpillsCost = RelFreq * Stats.FoldedSpills;
3274 Stats.CopiesCost = RelFreq * Stats.Copies;
3275 return Stats;
3276 }
3277
reportStats(MachineLoop * L)3278 RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) {
3279 RAGreedyStats Stats;
3280
3281 // Sum up the spill and reloads in subloops.
3282 for (MachineLoop *SubLoop : *L)
3283 Stats.add(reportStats(SubLoop));
3284
3285 for (MachineBasicBlock *MBB : L->getBlocks())
3286 // Handle blocks that were not included in subloops.
3287 if (Loops->getLoopFor(MBB) == L)
3288 Stats.add(computeStats(*MBB));
3289
3290 if (!Stats.isEmpty()) {
3291 using namespace ore;
3292
3293 ORE->emit([&]() {
3294 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReloadCopies",
3295 L->getStartLoc(), L->getHeader());
3296 Stats.report(R);
3297 R << "generated in loop";
3298 return R;
3299 });
3300 }
3301 return Stats;
3302 }
3303
reportStats()3304 void RAGreedy::reportStats() {
3305 if (!ORE->allowExtraAnalysis(DEBUG_TYPE))
3306 return;
3307 RAGreedyStats Stats;
3308 for (MachineLoop *L : *Loops)
3309 Stats.add(reportStats(L));
3310 // Process non-loop blocks.
3311 for (MachineBasicBlock &MBB : *MF)
3312 if (!Loops->getLoopFor(&MBB))
3313 Stats.add(computeStats(MBB));
3314 if (!Stats.isEmpty()) {
3315 using namespace ore;
3316
3317 ORE->emit([&]() {
3318 DebugLoc Loc;
3319 if (auto *SP = MF->getFunction().getSubprogram())
3320 Loc = DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
3321 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "SpillReloadCopies", Loc,
3322 &MF->front());
3323 Stats.report(R);
3324 R << "generated in function";
3325 return R;
3326 });
3327 }
3328 }
3329
runOnMachineFunction(MachineFunction & mf)3330 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
3331 LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
3332 << "********** Function: " << mf.getName() << '\n');
3333
3334 MF = &mf;
3335 TRI = MF->getSubtarget().getRegisterInfo();
3336 TII = MF->getSubtarget().getInstrInfo();
3337 RCI.runOnMachineFunction(mf);
3338
3339 EnableLocalReassign = EnableLocalReassignment ||
3340 MF->getSubtarget().enableRALocalReassignment(
3341 MF->getTarget().getOptLevel());
3342
3343 EnableAdvancedRASplitCost =
3344 ConsiderLocalIntervalCost.getNumOccurrences()
3345 ? ConsiderLocalIntervalCost
3346 : MF->getSubtarget().enableAdvancedRASplitCost();
3347
3348 if (VerifyEnabled)
3349 MF->verify(this, "Before greedy register allocator");
3350
3351 RegAllocBase::init(getAnalysis<VirtRegMap>(),
3352 getAnalysis<LiveIntervals>(),
3353 getAnalysis<LiveRegMatrix>());
3354 Indexes = &getAnalysis<SlotIndexes>();
3355 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
3356 DomTree = &getAnalysis<MachineDominatorTree>();
3357 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
3358 Loops = &getAnalysis<MachineLoopInfo>();
3359 Bundles = &getAnalysis<EdgeBundles>();
3360 SpillPlacer = &getAnalysis<SpillPlacement>();
3361 DebugVars = &getAnalysis<LiveDebugVariables>();
3362 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3363
3364 initializeCSRCost();
3365
3366 RegCosts = TRI->getRegisterCosts(*MF);
3367
3368 VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *LIS, *VRM, *Loops, *MBFI);
3369 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI));
3370
3371 VRAI->calculateSpillWeightsAndHints();
3372
3373 LLVM_DEBUG(LIS->dump());
3374
3375 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
3376 SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI));
3377 ExtraRegInfo.clear();
3378 ExtraRegInfo.resize(MRI->getNumVirtRegs());
3379 NextCascade = 1;
3380 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
3381 GlobalCand.resize(32); // This will grow as needed.
3382 SetOfBrokenHints.clear();
3383 LastEvicted.clear();
3384
3385 allocatePhysRegs();
3386 tryHintsRecoloring();
3387
3388 if (VerifyEnabled)
3389 MF->verify(this, "Before post optimization");
3390 postOptimization();
3391 reportStats();
3392
3393 releaseMemory();
3394 return true;
3395 }
3396