1 //===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The inline spiller modifies the machine function directly instead of
10 // inserting spills and restores in VirtRegMap.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "SplitKit.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/LiveInterval.h"
26 #include "llvm/CodeGen/LiveIntervalCalc.h"
27 #include "llvm/CodeGen/LiveIntervals.h"
28 #include "llvm/CodeGen/LiveRangeEdit.h"
29 #include "llvm/CodeGen/LiveStacks.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstr.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineInstrBundle.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/SlotIndexes.h"
42 #include "llvm/CodeGen/Spiller.h"
43 #include "llvm/CodeGen/StackMaps.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetOpcodes.h"
46 #include "llvm/CodeGen/TargetRegisterInfo.h"
47 #include "llvm/CodeGen/TargetSubtargetInfo.h"
48 #include "llvm/CodeGen/VirtRegMap.h"
49 #include "llvm/Config/llvm-config.h"
50 #include "llvm/Support/BlockFrequency.h"
51 #include "llvm/Support/BranchProbability.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include <cassert>
58 #include <iterator>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62
63 using namespace llvm;
64
65 #define DEBUG_TYPE "regalloc"
66
67 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
68 STATISTIC(NumSnippets, "Number of spilled snippets");
69 STATISTIC(NumSpills, "Number of spills inserted");
70 STATISTIC(NumSpillsRemoved, "Number of spills removed");
71 STATISTIC(NumReloads, "Number of reloads inserted");
72 STATISTIC(NumReloadsRemoved, "Number of reloads removed");
73 STATISTIC(NumFolded, "Number of folded stack accesses");
74 STATISTIC(NumFoldedLoads, "Number of folded loads");
75 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
76
77 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
78 cl::desc("Disable inline spill hoisting"));
79 static cl::opt<bool>
80 RestrictStatepointRemat("restrict-statepoint-remat",
81 cl::init(false), cl::Hidden,
82 cl::desc("Restrict remat for statepoint operands"));
83
84 namespace {
85
86 class HoistSpillHelper : private LiveRangeEdit::Delegate {
87 MachineFunction &MF;
88 LiveIntervals &LIS;
89 LiveStacks &LSS;
90 AliasAnalysis *AA;
91 MachineDominatorTree &MDT;
92 MachineLoopInfo &Loops;
93 VirtRegMap &VRM;
94 MachineRegisterInfo &MRI;
95 const TargetInstrInfo &TII;
96 const TargetRegisterInfo &TRI;
97 const MachineBlockFrequencyInfo &MBFI;
98
99 InsertPointAnalysis IPA;
100
101 // Map from StackSlot to the LiveInterval of the original register.
102 // Note the LiveInterval of the original register may have been deleted
103 // after it is spilled. We keep a copy here to track the range where
104 // spills can be moved.
105 DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
106
107 // Map from pair of (StackSlot and Original VNI) to a set of spills which
108 // have the same stackslot and have equal values defined by Original VNI.
109 // These spills are mergeable and are hoist candiates.
110 using MergeableSpillsMap =
111 MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
112 MergeableSpillsMap MergeableSpills;
113
114 /// This is the map from original register to a set containing all its
115 /// siblings. To hoist a spill to another BB, we need to find out a live
116 /// sibling there and use it as the source of the new spill.
117 DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
118
119 bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
120 MachineBasicBlock &BB, Register &LiveReg);
121
122 void rmRedundantSpills(
123 SmallPtrSet<MachineInstr *, 16> &Spills,
124 SmallVectorImpl<MachineInstr *> &SpillsToRm,
125 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
126
127 void getVisitOrders(
128 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
129 SmallVectorImpl<MachineDomTreeNode *> &Orders,
130 SmallVectorImpl<MachineInstr *> &SpillsToRm,
131 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
132 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
133
134 void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
135 SmallPtrSet<MachineInstr *, 16> &Spills,
136 SmallVectorImpl<MachineInstr *> &SpillsToRm,
137 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
138
139 public:
HoistSpillHelper(MachineFunctionPass & pass,MachineFunction & mf,VirtRegMap & vrm)140 HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
141 VirtRegMap &vrm)
142 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
143 LSS(pass.getAnalysis<LiveStacks>()),
144 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
145 MDT(pass.getAnalysis<MachineDominatorTree>()),
146 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
147 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
148 TRI(*mf.getSubtarget().getRegisterInfo()),
149 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
150 IPA(LIS, mf.getNumBlockIDs()) {}
151
152 void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
153 unsigned Original);
154 bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
155 void hoistAllSpills();
156 void LRE_DidCloneVirtReg(Register, Register) override;
157 };
158
159 class InlineSpiller : public Spiller {
160 MachineFunction &MF;
161 LiveIntervals &LIS;
162 LiveStacks &LSS;
163 AliasAnalysis *AA;
164 MachineDominatorTree &MDT;
165 MachineLoopInfo &Loops;
166 VirtRegMap &VRM;
167 MachineRegisterInfo &MRI;
168 const TargetInstrInfo &TII;
169 const TargetRegisterInfo &TRI;
170 const MachineBlockFrequencyInfo &MBFI;
171
172 // Variables that are valid during spill(), but used by multiple methods.
173 LiveRangeEdit *Edit;
174 LiveInterval *StackInt;
175 int StackSlot;
176 unsigned Original;
177
178 // All registers to spill to StackSlot, including the main register.
179 SmallVector<Register, 8> RegsToSpill;
180
181 // All COPY instructions to/from snippets.
182 // They are ignored since both operands refer to the same stack slot.
183 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
184
185 // Values that failed to remat at some point.
186 SmallPtrSet<VNInfo*, 8> UsedValues;
187
188 // Dead defs generated during spilling.
189 SmallVector<MachineInstr*, 8> DeadDefs;
190
191 // Object records spills information and does the hoisting.
192 HoistSpillHelper HSpiller;
193
194 ~InlineSpiller() override = default;
195
196 public:
InlineSpiller(MachineFunctionPass & pass,MachineFunction & mf,VirtRegMap & vrm)197 InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
198 : MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
199 LSS(pass.getAnalysis<LiveStacks>()),
200 AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
201 MDT(pass.getAnalysis<MachineDominatorTree>()),
202 Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
203 MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
204 TRI(*mf.getSubtarget().getRegisterInfo()),
205 MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
206 HSpiller(pass, mf, vrm) {}
207
208 void spill(LiveRangeEdit &) override;
209 void postOptimization() override;
210
211 private:
212 bool isSnippet(const LiveInterval &SnipLI);
213 void collectRegsToSpill();
214
isRegToSpill(Register Reg)215 bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
216
217 bool isSibling(Register Reg);
218 bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
219 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
220
221 void markValueUsed(LiveInterval*, VNInfo*);
222 bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
223 bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
224 void reMaterializeAll();
225
226 bool coalesceStackAccess(MachineInstr *MI, Register Reg);
227 bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
228 MachineInstr *LoadMI = nullptr);
229 void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
230 void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
231
232 void spillAroundUses(Register Reg);
233 void spillAll();
234 };
235
236 } // end anonymous namespace
237
238 Spiller::~Spiller() = default;
239
anchor()240 void Spiller::anchor() {}
241
createInlineSpiller(MachineFunctionPass & pass,MachineFunction & mf,VirtRegMap & vrm)242 Spiller *llvm::createInlineSpiller(MachineFunctionPass &pass,
243 MachineFunction &mf,
244 VirtRegMap &vrm) {
245 return new InlineSpiller(pass, mf, vrm);
246 }
247
248 //===----------------------------------------------------------------------===//
249 // Snippets
250 //===----------------------------------------------------------------------===//
251
252 // When spilling a virtual register, we also spill any snippets it is connected
253 // to. The snippets are small live ranges that only have a single real use,
254 // leftovers from live range splitting. Spilling them enables memory operand
255 // folding or tightens the live range around the single use.
256 //
257 // This minimizes register pressure and maximizes the store-to-load distance for
258 // spill slots which can be important in tight loops.
259
260 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
261 /// otherwise return 0.
isFullCopyOf(const MachineInstr & MI,Register Reg)262 static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
263 if (!MI.isFullCopy())
264 return Register();
265 if (MI.getOperand(0).getReg() == Reg)
266 return MI.getOperand(1).getReg();
267 if (MI.getOperand(1).getReg() == Reg)
268 return MI.getOperand(0).getReg();
269 return Register();
270 }
271
getVDefInterval(const MachineInstr & MI,LiveIntervals & LIS)272 static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
273 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
274 const MachineOperand &MO = MI.getOperand(I);
275 if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
276 LIS.getInterval(MO.getReg());
277 }
278 }
279
280 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
281 /// It is assumed that SnipLI is a virtual register with the same original as
282 /// Edit->getReg().
isSnippet(const LiveInterval & SnipLI)283 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
284 Register Reg = Edit->getReg();
285
286 // A snippet is a tiny live range with only a single instruction using it
287 // besides copies to/from Reg or spills/fills. We accept:
288 //
289 // %snip = COPY %Reg / FILL fi#
290 // %snip = USE %snip
291 // %Reg = COPY %snip / SPILL %snip, fi#
292 //
293 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
294 return false;
295
296 MachineInstr *UseMI = nullptr;
297
298 // Check that all uses satisfy our criteria.
299 for (MachineRegisterInfo::reg_instr_nodbg_iterator
300 RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
301 E = MRI.reg_instr_nodbg_end();
302 RI != E;) {
303 MachineInstr &MI = *RI++;
304
305 // Allow copies to/from Reg.
306 if (isFullCopyOf(MI, Reg))
307 continue;
308
309 // Allow stack slot loads.
310 int FI;
311 if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
312 continue;
313
314 // Allow stack slot stores.
315 if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
316 continue;
317
318 // Allow a single additional instruction.
319 if (UseMI && &MI != UseMI)
320 return false;
321 UseMI = &MI;
322 }
323 return true;
324 }
325
326 /// collectRegsToSpill - Collect live range snippets that only have a single
327 /// real use.
collectRegsToSpill()328 void InlineSpiller::collectRegsToSpill() {
329 Register Reg = Edit->getReg();
330
331 // Main register always spills.
332 RegsToSpill.assign(1, Reg);
333 SnippetCopies.clear();
334
335 // Snippets all have the same original, so there can't be any for an original
336 // register.
337 if (Original == Reg)
338 return;
339
340 for (MachineRegisterInfo::reg_instr_iterator
341 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) {
342 MachineInstr &MI = *RI++;
343 Register SnipReg = isFullCopyOf(MI, Reg);
344 if (!isSibling(SnipReg))
345 continue;
346 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
347 if (!isSnippet(SnipLI))
348 continue;
349 SnippetCopies.insert(&MI);
350 if (isRegToSpill(SnipReg))
351 continue;
352 RegsToSpill.push_back(SnipReg);
353 LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
354 ++NumSnippets;
355 }
356 }
357
isSibling(Register Reg)358 bool InlineSpiller::isSibling(Register Reg) {
359 return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
360 }
361
362 /// It is beneficial to spill to earlier place in the same BB in case
363 /// as follows:
364 /// There is an alternative def earlier in the same MBB.
365 /// Hoist the spill as far as possible in SpillMBB. This can ease
366 /// register pressure:
367 ///
368 /// x = def
369 /// y = use x
370 /// s = copy x
371 ///
372 /// Hoisting the spill of s to immediately after the def removes the
373 /// interference between x and y:
374 ///
375 /// x = def
376 /// spill x
377 /// y = use killed x
378 ///
379 /// This hoist only helps when the copy kills its source.
380 ///
hoistSpillInsideBB(LiveInterval & SpillLI,MachineInstr & CopyMI)381 bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
382 MachineInstr &CopyMI) {
383 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
384 #ifndef NDEBUG
385 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
386 assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
387 #endif
388
389 Register SrcReg = CopyMI.getOperand(1).getReg();
390 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
391 VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
392 LiveQueryResult SrcQ = SrcLI.Query(Idx);
393 MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
394 if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
395 return false;
396
397 // Conservatively extend the stack slot range to the range of the original
398 // value. We may be able to do better with stack slot coloring by being more
399 // careful here.
400 assert(StackInt && "No stack slot assigned yet.");
401 LiveInterval &OrigLI = LIS.getInterval(Original);
402 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
403 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
404 LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
405 << *StackInt << '\n');
406
407 // We are going to spill SrcVNI immediately after its def, so clear out
408 // any later spills of the same value.
409 eliminateRedundantSpills(SrcLI, SrcVNI);
410
411 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
412 MachineBasicBlock::iterator MII;
413 if (SrcVNI->isPHIDef())
414 MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
415 else {
416 MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
417 assert(DefMI && "Defining instruction disappeared");
418 MII = DefMI;
419 ++MII;
420 }
421 MachineInstrSpan MIS(MII, MBB);
422 // Insert spill without kill flag immediately after def.
423 TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
424 MRI.getRegClass(SrcReg), &TRI);
425 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
426 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
427 getVDefInterval(MI, LIS);
428 --MII; // Point to store instruction.
429 LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
430
431 // If there is only 1 store instruction is required for spill, add it
432 // to mergeable list. In X86 AMX, 2 intructions are required to store.
433 // We disable the merge for this case.
434 if (MIS.begin() == MII)
435 HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
436 ++NumSpills;
437 return true;
438 }
439
440 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
441 /// redundant spills of this value in SLI.reg and sibling copies.
eliminateRedundantSpills(LiveInterval & SLI,VNInfo * VNI)442 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
443 assert(VNI && "Missing value");
444 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
445 WorkList.push_back(std::make_pair(&SLI, VNI));
446 assert(StackInt && "No stack slot assigned yet.");
447
448 do {
449 LiveInterval *LI;
450 std::tie(LI, VNI) = WorkList.pop_back_val();
451 Register Reg = LI->reg();
452 LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
453 << VNI->def << " in " << *LI << '\n');
454
455 // Regs to spill are taken care of.
456 if (isRegToSpill(Reg))
457 continue;
458
459 // Add all of VNI's live range to StackInt.
460 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
461 LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
462
463 // Find all spills and copies of VNI.
464 for (MachineRegisterInfo::use_instr_nodbg_iterator
465 UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
466 UI != E; ) {
467 MachineInstr &MI = *UI++;
468 if (!MI.isCopy() && !MI.mayStore())
469 continue;
470 SlotIndex Idx = LIS.getInstructionIndex(MI);
471 if (LI->getVNInfoAt(Idx) != VNI)
472 continue;
473
474 // Follow sibling copies down the dominator tree.
475 if (Register DstReg = isFullCopyOf(MI, Reg)) {
476 if (isSibling(DstReg)) {
477 LiveInterval &DstLI = LIS.getInterval(DstReg);
478 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
479 assert(DstVNI && "Missing defined value");
480 assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
481 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
482 }
483 continue;
484 }
485
486 // Erase spills.
487 int FI;
488 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
489 LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
490 // eliminateDeadDefs won't normally remove stores, so switch opcode.
491 MI.setDesc(TII.get(TargetOpcode::KILL));
492 DeadDefs.push_back(&MI);
493 ++NumSpillsRemoved;
494 if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
495 --NumSpills;
496 }
497 }
498 } while (!WorkList.empty());
499 }
500
501 //===----------------------------------------------------------------------===//
502 // Rematerialization
503 //===----------------------------------------------------------------------===//
504
505 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
506 /// instruction cannot be eliminated. See through snippet copies
markValueUsed(LiveInterval * LI,VNInfo * VNI)507 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
508 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
509 WorkList.push_back(std::make_pair(LI, VNI));
510 do {
511 std::tie(LI, VNI) = WorkList.pop_back_val();
512 if (!UsedValues.insert(VNI).second)
513 continue;
514
515 if (VNI->isPHIDef()) {
516 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
517 for (MachineBasicBlock *P : MBB->predecessors()) {
518 VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
519 if (PVNI)
520 WorkList.push_back(std::make_pair(LI, PVNI));
521 }
522 continue;
523 }
524
525 // Follow snippet copies.
526 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
527 if (!SnippetCopies.count(MI))
528 continue;
529 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
530 assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
531 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
532 assert(SnipVNI && "Snippet undefined before copy");
533 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
534 } while (!WorkList.empty());
535 }
536
canGuaranteeAssignmentAfterRemat(Register VReg,MachineInstr & MI)537 bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
538 MachineInstr &MI) {
539 if (!RestrictStatepointRemat)
540 return true;
541 // Here's a quick explanation of the problem we're trying to handle here:
542 // * There are some pseudo instructions with more vreg uses than there are
543 // physical registers on the machine.
544 // * This is normally handled by spilling the vreg, and folding the reload
545 // into the user instruction. (Thus decreasing the number of used vregs
546 // until the remainder can be assigned to physregs.)
547 // * However, since we may try to spill vregs in any order, we can end up
548 // trying to spill each operand to the instruction, and then rematting it
549 // instead. When that happens, the new live intervals (for the remats) are
550 // expected to be trivially assignable (i.e. RS_Done). However, since we
551 // may have more remats than physregs, we're guaranteed to fail to assign
552 // one.
553 // At the moment, we only handle this for STATEPOINTs since they're the only
554 // pseudo op where we've seen this. If we start seeing other instructions
555 // with the same problem, we need to revisit this.
556 if (MI.getOpcode() != TargetOpcode::STATEPOINT)
557 return true;
558 // For STATEPOINTs we allow re-materialization for fixed arguments only hoping
559 // that number of physical registers is enough to cover all fixed arguments.
560 // If it is not true we need to revisit it.
561 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
562 EndIdx = MI.getNumOperands();
563 Idx < EndIdx; ++Idx) {
564 MachineOperand &MO = MI.getOperand(Idx);
565 if (MO.isReg() && MO.getReg() == VReg)
566 return false;
567 }
568 return true;
569 }
570
571 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
reMaterializeFor(LiveInterval & VirtReg,MachineInstr & MI)572 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
573 // Analyze instruction
574 SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
575 VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
576
577 if (!RI.Reads)
578 return false;
579
580 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
581 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
582
583 if (!ParentVNI) {
584 LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
585 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
586 MachineOperand &MO = MI.getOperand(i);
587 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
588 MO.setIsUndef();
589 }
590 LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
591 return true;
592 }
593
594 if (SnippetCopies.count(&MI))
595 return false;
596
597 LiveInterval &OrigLI = LIS.getInterval(Original);
598 VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
599 LiveRangeEdit::Remat RM(ParentVNI);
600 RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
601
602 if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
603 markValueUsed(&VirtReg, ParentVNI);
604 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
605 return false;
606 }
607
608 // If the instruction also writes VirtReg.reg, it had better not require the
609 // same register for uses and defs.
610 if (RI.Tied) {
611 markValueUsed(&VirtReg, ParentVNI);
612 LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
613 return false;
614 }
615
616 // Before rematerializing into a register for a single instruction, try to
617 // fold a load into the instruction. That avoids allocating a new register.
618 if (RM.OrigMI->canFoldAsLoad() &&
619 foldMemoryOperand(Ops, RM.OrigMI)) {
620 Edit->markRematerialized(RM.ParentVNI);
621 ++NumFoldedLoads;
622 return true;
623 }
624
625 // If we can't guarantee that we'll be able to actually assign the new vreg,
626 // we can't remat.
627 if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
628 markValueUsed(&VirtReg, ParentVNI);
629 LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
630 return false;
631 }
632
633 // Allocate a new register for the remat.
634 Register NewVReg = Edit->createFrom(Original);
635
636 // Finally we can rematerialize OrigMI before MI.
637 SlotIndex DefIdx =
638 Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
639
640 // We take the DebugLoc from MI, since OrigMI may be attributed to a
641 // different source location.
642 auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
643 NewMI->setDebugLoc(MI.getDebugLoc());
644
645 (void)DefIdx;
646 LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
647 << *LIS.getInstructionFromIndex(DefIdx));
648
649 // Replace operands
650 for (const auto &OpPair : Ops) {
651 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
652 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
653 MO.setReg(NewVReg);
654 MO.setIsKill();
655 }
656 }
657 LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
658
659 ++NumRemats;
660 return true;
661 }
662
663 /// reMaterializeAll - Try to rematerialize as many uses as possible,
664 /// and trim the live ranges after.
reMaterializeAll()665 void InlineSpiller::reMaterializeAll() {
666 if (!Edit->anyRematerializable(AA))
667 return;
668
669 UsedValues.clear();
670
671 // Try to remat before all uses of snippets.
672 bool anyRemat = false;
673 for (Register Reg : RegsToSpill) {
674 LiveInterval &LI = LIS.getInterval(Reg);
675 for (MachineRegisterInfo::reg_bundle_iterator
676 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
677 RegI != E; ) {
678 MachineInstr &MI = *RegI++;
679
680 // Debug values are not allowed to affect codegen.
681 if (MI.isDebugValue())
682 continue;
683
684 assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
685 "instruction that isn't a DBG_VALUE");
686
687 anyRemat |= reMaterializeFor(LI, MI);
688 }
689 }
690 if (!anyRemat)
691 return;
692
693 // Remove any values that were completely rematted.
694 for (Register Reg : RegsToSpill) {
695 LiveInterval &LI = LIS.getInterval(Reg);
696 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
697 I != E; ++I) {
698 VNInfo *VNI = *I;
699 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
700 continue;
701 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
702 MI->addRegisterDead(Reg, &TRI);
703 if (!MI->allDefsAreDead())
704 continue;
705 LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
706 DeadDefs.push_back(MI);
707 }
708 }
709
710 // Eliminate dead code after remat. Note that some snippet copies may be
711 // deleted here.
712 if (DeadDefs.empty())
713 return;
714 LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
715 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
716
717 // LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
718 // after rematerialization. To remove a VNI for a vreg from its LiveInterval,
719 // LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
720 // removed, PHI VNI are still left in the LiveInterval.
721 // So to get rid of unused reg, we need to check whether it has non-dbg
722 // reference instead of whether it has non-empty interval.
723 unsigned ResultPos = 0;
724 for (Register Reg : RegsToSpill) {
725 if (MRI.reg_nodbg_empty(Reg)) {
726 Edit->eraseVirtReg(Reg);
727 continue;
728 }
729
730 assert(LIS.hasInterval(Reg) &&
731 (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
732 "Empty and not used live-range?!");
733
734 RegsToSpill[ResultPos++] = Reg;
735 }
736 RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
737 LLVM_DEBUG(dbgs() << RegsToSpill.size()
738 << " registers to spill after remat.\n");
739 }
740
741 //===----------------------------------------------------------------------===//
742 // Spilling
743 //===----------------------------------------------------------------------===//
744
745 /// If MI is a load or store of StackSlot, it can be removed.
coalesceStackAccess(MachineInstr * MI,Register Reg)746 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
747 int FI = 0;
748 Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
749 bool IsLoad = InstrReg;
750 if (!IsLoad)
751 InstrReg = TII.isStoreToStackSlot(*MI, FI);
752
753 // We have a stack access. Is it the right register and slot?
754 if (InstrReg != Reg || FI != StackSlot)
755 return false;
756
757 if (!IsLoad)
758 HSpiller.rmFromMergeableSpills(*MI, StackSlot);
759
760 LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
761 LIS.RemoveMachineInstrFromMaps(*MI);
762 MI->eraseFromParent();
763
764 if (IsLoad) {
765 ++NumReloadsRemoved;
766 --NumReloads;
767 } else {
768 ++NumSpillsRemoved;
769 --NumSpills;
770 }
771
772 return true;
773 }
774
775 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
776 LLVM_DUMP_METHOD
777 // Dump the range of instructions from B to E with their slot indexes.
dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,MachineBasicBlock::iterator E,LiveIntervals const & LIS,const char * const header,Register VReg=Register ())778 static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
779 MachineBasicBlock::iterator E,
780 LiveIntervals const &LIS,
781 const char *const header,
782 Register VReg = Register()) {
783 char NextLine = '\n';
784 char SlotIndent = '\t';
785
786 if (std::next(B) == E) {
787 NextLine = ' ';
788 SlotIndent = ' ';
789 }
790
791 dbgs() << '\t' << header << ": " << NextLine;
792
793 for (MachineBasicBlock::iterator I = B; I != E; ++I) {
794 SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
795
796 // If a register was passed in and this instruction has it as a
797 // destination that is marked as an early clobber, print the
798 // early-clobber slot index.
799 if (VReg) {
800 MachineOperand *MO = I->findRegisterDefOperand(VReg);
801 if (MO && MO->isEarlyClobber())
802 Idx = Idx.getRegSlot(true);
803 }
804
805 dbgs() << SlotIndent << Idx << '\t' << *I;
806 }
807 }
808 #endif
809
810 /// foldMemoryOperand - Try folding stack slot references in Ops into their
811 /// instructions.
812 ///
813 /// @param Ops Operand indices from AnalyzeVirtRegInBundle().
814 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
815 /// @return True on success.
816 bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr *,unsigned>> Ops,MachineInstr * LoadMI)817 foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
818 MachineInstr *LoadMI) {
819 if (Ops.empty())
820 return false;
821 // Don't attempt folding in bundles.
822 MachineInstr *MI = Ops.front().first;
823 if (Ops.back().first != MI || MI->isBundled())
824 return false;
825
826 bool WasCopy = MI->isCopy();
827 Register ImpReg;
828
829 // TII::foldMemoryOperand will do what we need here for statepoint
830 // (fold load into use and remove corresponding def). We will replace
831 // uses of removed def with loads (spillAroundUses).
832 // For that to work we need to untie def and use to pass it through
833 // foldMemoryOperand and signal foldPatchpoint that it is allowed to
834 // fold them.
835 bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
836
837 // Spill subregs if the target allows it.
838 // We always want to spill subregs for stackmap/patchpoint pseudos.
839 bool SpillSubRegs = TII.isSubregFoldable() ||
840 MI->getOpcode() == TargetOpcode::STATEPOINT ||
841 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
842 MI->getOpcode() == TargetOpcode::STACKMAP;
843
844 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
845 // operands.
846 SmallVector<unsigned, 8> FoldOps;
847 for (const auto &OpPair : Ops) {
848 unsigned Idx = OpPair.second;
849 assert(MI == OpPair.first && "Instruction conflict during operand folding");
850 MachineOperand &MO = MI->getOperand(Idx);
851 if (MO.isImplicit()) {
852 ImpReg = MO.getReg();
853 continue;
854 }
855
856 if (!SpillSubRegs && MO.getSubReg())
857 return false;
858 // We cannot fold a load instruction into a def.
859 if (LoadMI && MO.isDef())
860 return false;
861 // Tied use operands should not be passed to foldMemoryOperand.
862 if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
863 FoldOps.push_back(Idx);
864 }
865
866 // If we only have implicit uses, we won't be able to fold that.
867 // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
868 if (FoldOps.empty())
869 return false;
870
871 MachineInstrSpan MIS(MI, MI->getParent());
872
873 SmallVector<std::pair<unsigned, unsigned> > TiedOps;
874 if (UntieRegs)
875 for (unsigned Idx : FoldOps) {
876 MachineOperand &MO = MI->getOperand(Idx);
877 if (!MO.isTied())
878 continue;
879 unsigned Tied = MI->findTiedOperandIdx(Idx);
880 if (MO.isUse())
881 TiedOps.emplace_back(Tied, Idx);
882 else {
883 assert(MO.isDef() && "Tied to not use and def?");
884 TiedOps.emplace_back(Idx, Tied);
885 }
886 MI->untieRegOperand(Idx);
887 }
888
889 MachineInstr *FoldMI =
890 LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
891 : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
892 if (!FoldMI) {
893 // Re-tie operands.
894 for (auto Tied : TiedOps)
895 MI->tieOperands(Tied.first, Tied.second);
896 return false;
897 }
898
899 // Remove LIS for any dead defs in the original MI not in FoldMI.
900 for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
901 if (!MO->isReg())
902 continue;
903 Register Reg = MO->getReg();
904 if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
905 continue;
906 }
907 // Skip non-Defs, including undef uses and internal reads.
908 if (MO->isUse())
909 continue;
910 PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
911 if (RI.FullyDefined)
912 continue;
913 // FoldMI does not define this physreg. Remove the LI segment.
914 assert(MO->isDead() && "Cannot fold physreg def");
915 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
916 LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
917 }
918
919 int FI;
920 if (TII.isStoreToStackSlot(*MI, FI) &&
921 HSpiller.rmFromMergeableSpills(*MI, FI))
922 --NumSpills;
923 LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
924 // Update the call site info.
925 if (MI->isCandidateForCallSiteEntry())
926 MI->getMF()->moveCallSiteInfo(MI, FoldMI);
927 MI->eraseFromParent();
928
929 // Insert any new instructions other than FoldMI into the LIS maps.
930 assert(!MIS.empty() && "Unexpected empty span of instructions!");
931 for (MachineInstr &MI : MIS)
932 if (&MI != FoldMI)
933 LIS.InsertMachineInstrInMaps(MI);
934
935 // TII.foldMemoryOperand may have left some implicit operands on the
936 // instruction. Strip them.
937 if (ImpReg)
938 for (unsigned i = FoldMI->getNumOperands(); i; --i) {
939 MachineOperand &MO = FoldMI->getOperand(i - 1);
940 if (!MO.isReg() || !MO.isImplicit())
941 break;
942 if (MO.getReg() == ImpReg)
943 FoldMI->RemoveOperand(i - 1);
944 }
945
946 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
947 "folded"));
948
949 if (!WasCopy)
950 ++NumFolded;
951 else if (Ops.front().second == 0) {
952 ++NumSpills;
953 // If there is only 1 store instruction is required for spill, add it
954 // to mergeable list. In X86 AMX, 2 intructions are required to store.
955 // We disable the merge for this case.
956 if (std::distance(MIS.begin(), MIS.end()) <= 1)
957 HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
958 } else
959 ++NumReloads;
960 return true;
961 }
962
insertReload(Register NewVReg,SlotIndex Idx,MachineBasicBlock::iterator MI)963 void InlineSpiller::insertReload(Register NewVReg,
964 SlotIndex Idx,
965 MachineBasicBlock::iterator MI) {
966 MachineBasicBlock &MBB = *MI->getParent();
967
968 MachineInstrSpan MIS(MI, &MBB);
969 TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
970 MRI.getRegClass(NewVReg), &TRI);
971
972 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
973
974 LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
975 NewVReg));
976 ++NumReloads;
977 }
978
979 /// Check if \p Def fully defines a VReg with an undefined value.
980 /// If that's the case, that means the value of VReg is actually
981 /// not relevant.
isRealSpill(const MachineInstr & Def)982 static bool isRealSpill(const MachineInstr &Def) {
983 if (!Def.isImplicitDef())
984 return true;
985 assert(Def.getNumOperands() == 1 &&
986 "Implicit def with more than one definition");
987 // We can say that the VReg defined by Def is undef, only if it is
988 // fully defined by Def. Otherwise, some of the lanes may not be
989 // undef and the value of the VReg matters.
990 return Def.getOperand(0).getSubReg();
991 }
992
993 /// insertSpill - Insert a spill of NewVReg after MI.
insertSpill(Register NewVReg,bool isKill,MachineBasicBlock::iterator MI)994 void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
995 MachineBasicBlock::iterator MI) {
996 // Spill are not terminators, so inserting spills after terminators will
997 // violate invariants in MachineVerifier.
998 assert(!MI->isTerminator() && "Inserting a spill after a terminator");
999 MachineBasicBlock &MBB = *MI->getParent();
1000
1001 MachineInstrSpan MIS(MI, &MBB);
1002 MachineBasicBlock::iterator SpillBefore = std::next(MI);
1003 bool IsRealSpill = isRealSpill(*MI);
1004
1005 if (IsRealSpill)
1006 TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
1007 MRI.getRegClass(NewVReg), &TRI);
1008 else
1009 // Don't spill undef value.
1010 // Anything works for undef, in particular keeping the memory
1011 // uninitialized is a viable option and it saves code size and
1012 // run time.
1013 BuildMI(MBB, SpillBefore, MI->getDebugLoc(), TII.get(TargetOpcode::KILL))
1014 .addReg(NewVReg, getKillRegState(isKill));
1015
1016 MachineBasicBlock::iterator Spill = std::next(MI);
1017 LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
1018 for (const MachineInstr &MI : make_range(Spill, MIS.end()))
1019 getVDefInterval(MI, LIS);
1020
1021 LLVM_DEBUG(
1022 dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
1023 ++NumSpills;
1024 // If there is only 1 store instruction is required for spill, add it
1025 // to mergeable list. In X86 AMX, 2 intructions are required to store.
1026 // We disable the merge for this case.
1027 if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
1028 HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
1029 }
1030
1031 /// spillAroundUses - insert spill code around each use of Reg.
spillAroundUses(Register Reg)1032 void InlineSpiller::spillAroundUses(Register Reg) {
1033 LLVM_DEBUG(dbgs() << "spillAroundUses " << printReg(Reg) << '\n');
1034 LiveInterval &OldLI = LIS.getInterval(Reg);
1035
1036 // Iterate over instructions using Reg.
1037 for (MachineRegisterInfo::reg_bundle_iterator
1038 RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
1039 RegI != E; ) {
1040 MachineInstr *MI = &*(RegI++);
1041
1042 // Debug values are not allowed to affect codegen.
1043 if (MI->isDebugValue()) {
1044 // Modify DBG_VALUE now that the value is in a spill slot.
1045 MachineBasicBlock *MBB = MI->getParent();
1046 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:\t" << *MI);
1047 buildDbgValueForSpill(*MBB, MI, *MI, StackSlot);
1048 MBB->erase(MI);
1049 continue;
1050 }
1051
1052 assert(!MI->isDebugInstr() && "Did not expect to find a use in debug "
1053 "instruction that isn't a DBG_VALUE");
1054
1055 // Ignore copies to/from snippets. We'll delete them.
1056 if (SnippetCopies.count(MI))
1057 continue;
1058
1059 // Stack slot accesses may coalesce away.
1060 if (coalesceStackAccess(MI, Reg))
1061 continue;
1062
1063 // Analyze instruction.
1064 SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
1065 VirtRegInfo RI = AnalyzeVirtRegInBundle(*MI, Reg, &Ops);
1066
1067 // Find the slot index where this instruction reads and writes OldLI.
1068 // This is usually the def slot, except for tied early clobbers.
1069 SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
1070 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
1071 if (SlotIndex::isSameInstr(Idx, VNI->def))
1072 Idx = VNI->def;
1073
1074 // Check for a sibling copy.
1075 Register SibReg = isFullCopyOf(*MI, Reg);
1076 if (SibReg && isSibling(SibReg)) {
1077 // This may actually be a copy between snippets.
1078 if (isRegToSpill(SibReg)) {
1079 LLVM_DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1080 SnippetCopies.insert(MI);
1081 continue;
1082 }
1083 if (RI.Writes) {
1084 if (hoistSpillInsideBB(OldLI, *MI)) {
1085 // This COPY is now dead, the value is already in the stack slot.
1086 MI->getOperand(0).setIsDead();
1087 DeadDefs.push_back(MI);
1088 continue;
1089 }
1090 } else {
1091 // This is a reload for a sib-reg copy. Drop spills downstream.
1092 LiveInterval &SibLI = LIS.getInterval(SibReg);
1093 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1094 // The COPY will fold to a reload below.
1095 }
1096 }
1097
1098 // Attempt to fold memory ops.
1099 if (foldMemoryOperand(Ops))
1100 continue;
1101
1102 // Create a new virtual register for spill/fill.
1103 // FIXME: Infer regclass from instruction alone.
1104 Register NewVReg = Edit->createFrom(Reg);
1105
1106 if (RI.Reads)
1107 insertReload(NewVReg, Idx, MI);
1108
1109 // Rewrite instruction operands.
1110 bool hasLiveDef = false;
1111 for (const auto &OpPair : Ops) {
1112 MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
1113 MO.setReg(NewVReg);
1114 if (MO.isUse()) {
1115 if (!OpPair.first->isRegTiedToDefOperand(OpPair.second))
1116 MO.setIsKill();
1117 } else {
1118 if (!MO.isDead())
1119 hasLiveDef = true;
1120 }
1121 }
1122 LLVM_DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI << '\n');
1123
1124 // FIXME: Use a second vreg if instruction has no tied ops.
1125 if (RI.Writes)
1126 if (hasLiveDef)
1127 insertSpill(NewVReg, true, MI);
1128 }
1129 }
1130
1131 /// spillAll - Spill all registers remaining after rematerialization.
spillAll()1132 void InlineSpiller::spillAll() {
1133 // Update LiveStacks now that we are committed to spilling.
1134 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1135 StackSlot = VRM.assignVirt2StackSlot(Original);
1136 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1137 StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
1138 } else
1139 StackInt = &LSS.getInterval(StackSlot);
1140
1141 if (Original != Edit->getReg())
1142 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1143
1144 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1145 for (Register Reg : RegsToSpill)
1146 StackInt->MergeSegmentsInAsValue(LIS.getInterval(Reg),
1147 StackInt->getValNumInfo(0));
1148 LLVM_DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1149
1150 // Spill around uses of all RegsToSpill.
1151 for (Register Reg : RegsToSpill)
1152 spillAroundUses(Reg);
1153
1154 // Hoisted spills may cause dead code.
1155 if (!DeadDefs.empty()) {
1156 LLVM_DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1157 Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
1158 }
1159
1160 // Finally delete the SnippetCopies.
1161 for (Register Reg : RegsToSpill) {
1162 for (MachineRegisterInfo::reg_instr_iterator
1163 RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end();
1164 RI != E; ) {
1165 MachineInstr &MI = *(RI++);
1166 assert(SnippetCopies.count(&MI) && "Remaining use wasn't a snippet copy");
1167 // FIXME: Do this with a LiveRangeEdit callback.
1168 LIS.RemoveMachineInstrFromMaps(MI);
1169 MI.eraseFromParent();
1170 }
1171 }
1172
1173 // Delete all spilled registers.
1174 for (Register Reg : RegsToSpill)
1175 Edit->eraseVirtReg(Reg);
1176 }
1177
spill(LiveRangeEdit & edit)1178 void InlineSpiller::spill(LiveRangeEdit &edit) {
1179 ++NumSpilledRanges;
1180 Edit = &edit;
1181 assert(!Register::isStackSlot(edit.getReg()) &&
1182 "Trying to spill a stack slot.");
1183 // Share a stack slot among all descendants of Original.
1184 Original = VRM.getOriginal(edit.getReg());
1185 StackSlot = VRM.getStackSlot(Original);
1186 StackInt = nullptr;
1187
1188 LLVM_DEBUG(dbgs() << "Inline spilling "
1189 << TRI.getRegClassName(MRI.getRegClass(edit.getReg()))
1190 << ':' << edit.getParent() << "\nFrom original "
1191 << printReg(Original) << '\n');
1192 assert(edit.getParent().isSpillable() &&
1193 "Attempting to spill already spilled value.");
1194 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1195
1196 collectRegsToSpill();
1197 reMaterializeAll();
1198
1199 // Remat may handle everything.
1200 if (!RegsToSpill.empty())
1201 spillAll();
1202
1203 Edit->calculateRegClassAndHint(MF, Loops, MBFI);
1204 }
1205
1206 /// Optimizations after all the reg selections and spills are done.
postOptimization()1207 void InlineSpiller::postOptimization() { HSpiller.hoistAllSpills(); }
1208
1209 /// When a spill is inserted, add the spill to MergeableSpills map.
addToMergeableSpills(MachineInstr & Spill,int StackSlot,unsigned Original)1210 void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot,
1211 unsigned Original) {
1212 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1213 LiveInterval &OrigLI = LIS.getInterval(Original);
1214 // save a copy of LiveInterval in StackSlotToOrigLI because the original
1215 // LiveInterval may be cleared after all its references are spilled.
1216 if (StackSlotToOrigLI.find(StackSlot) == StackSlotToOrigLI.end()) {
1217 auto LI = std::make_unique<LiveInterval>(OrigLI.reg(), OrigLI.weight());
1218 LI->assign(OrigLI, Allocator);
1219 StackSlotToOrigLI[StackSlot] = std::move(LI);
1220 }
1221 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1222 VNInfo *OrigVNI = StackSlotToOrigLI[StackSlot]->getVNInfoAt(Idx.getRegSlot());
1223 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1224 MergeableSpills[MIdx].insert(&Spill);
1225 }
1226
1227 /// When a spill is removed, remove the spill from MergeableSpills map.
1228 /// Return true if the spill is removed successfully.
rmFromMergeableSpills(MachineInstr & Spill,int StackSlot)1229 bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill,
1230 int StackSlot) {
1231 auto It = StackSlotToOrigLI.find(StackSlot);
1232 if (It == StackSlotToOrigLI.end())
1233 return false;
1234 SlotIndex Idx = LIS.getInstructionIndex(Spill);
1235 VNInfo *OrigVNI = It->second->getVNInfoAt(Idx.getRegSlot());
1236 std::pair<int, VNInfo *> MIdx = std::make_pair(StackSlot, OrigVNI);
1237 return MergeableSpills[MIdx].erase(&Spill);
1238 }
1239
1240 /// Check BB to see if it is a possible target BB to place a hoisted spill,
1241 /// i.e., there should be a living sibling of OrigReg at the insert point.
isSpillCandBB(LiveInterval & OrigLI,VNInfo & OrigVNI,MachineBasicBlock & BB,Register & LiveReg)1242 bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
1243 MachineBasicBlock &BB, Register &LiveReg) {
1244 SlotIndex Idx;
1245 Register OrigReg = OrigLI.reg();
1246 MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, BB);
1247 if (MI != BB.end())
1248 Idx = LIS.getInstructionIndex(*MI);
1249 else
1250 Idx = LIS.getMBBEndIdx(&BB).getPrevSlot();
1251 SmallSetVector<Register, 16> &Siblings = Virt2SiblingsMap[OrigReg];
1252 assert(OrigLI.getVNInfoAt(Idx) == &OrigVNI && "Unexpected VNI");
1253
1254 for (const Register &SibReg : Siblings) {
1255 LiveInterval &LI = LIS.getInterval(SibReg);
1256 VNInfo *VNI = LI.getVNInfoAt(Idx);
1257 if (VNI) {
1258 LiveReg = SibReg;
1259 return true;
1260 }
1261 }
1262 return false;
1263 }
1264
1265 /// Remove redundant spills in the same BB. Save those redundant spills in
1266 /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map.
rmRedundantSpills(SmallPtrSet<MachineInstr *,16> & Spills,SmallVectorImpl<MachineInstr * > & SpillsToRm,DenseMap<MachineDomTreeNode *,MachineInstr * > & SpillBBToSpill)1267 void HoistSpillHelper::rmRedundantSpills(
1268 SmallPtrSet<MachineInstr *, 16> &Spills,
1269 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1270 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1271 // For each spill saw, check SpillBBToSpill[] and see if its BB already has
1272 // another spill inside. If a BB contains more than one spill, only keep the
1273 // earlier spill with smaller SlotIndex.
1274 for (const auto CurrentSpill : Spills) {
1275 MachineBasicBlock *Block = CurrentSpill->getParent();
1276 MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
1277 MachineInstr *PrevSpill = SpillBBToSpill[Node];
1278 if (PrevSpill) {
1279 SlotIndex PIdx = LIS.getInstructionIndex(*PrevSpill);
1280 SlotIndex CIdx = LIS.getInstructionIndex(*CurrentSpill);
1281 MachineInstr *SpillToRm = (CIdx > PIdx) ? CurrentSpill : PrevSpill;
1282 MachineInstr *SpillToKeep = (CIdx > PIdx) ? PrevSpill : CurrentSpill;
1283 SpillsToRm.push_back(SpillToRm);
1284 SpillBBToSpill[MDT.getBase().getNode(Block)] = SpillToKeep;
1285 } else {
1286 SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
1287 }
1288 }
1289 for (const auto SpillToRm : SpillsToRm)
1290 Spills.erase(SpillToRm);
1291 }
1292
1293 /// Starting from \p Root find a top-down traversal order of the dominator
1294 /// tree to visit all basic blocks containing the elements of \p Spills.
1295 /// Redundant spills will be found and put into \p SpillsToRm at the same
1296 /// time. \p SpillBBToSpill will be populated as part of the process and
1297 /// maps a basic block to the first store occurring in the basic block.
1298 /// \post SpillsToRm.union(Spills\@post) == Spills\@pre
getVisitOrders(MachineBasicBlock * Root,SmallPtrSet<MachineInstr *,16> & Spills,SmallVectorImpl<MachineDomTreeNode * > & Orders,SmallVectorImpl<MachineInstr * > & SpillsToRm,DenseMap<MachineDomTreeNode *,unsigned> & SpillsToKeep,DenseMap<MachineDomTreeNode *,MachineInstr * > & SpillBBToSpill)1299 void HoistSpillHelper::getVisitOrders(
1300 MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
1301 SmallVectorImpl<MachineDomTreeNode *> &Orders,
1302 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1303 DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
1304 DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) {
1305 // The set contains all the possible BB nodes to which we may hoist
1306 // original spills.
1307 SmallPtrSet<MachineDomTreeNode *, 8> WorkSet;
1308 // Save the BB nodes on the path from the first BB node containing
1309 // non-redundant spill to the Root node.
1310 SmallPtrSet<MachineDomTreeNode *, 8> NodesOnPath;
1311 // All the spills to be hoisted must originate from a single def instruction
1312 // to the OrigReg. It means the def instruction should dominate all the spills
1313 // to be hoisted. We choose the BB where the def instruction is located as
1314 // the Root.
1315 MachineDomTreeNode *RootIDomNode = MDT[Root]->getIDom();
1316 // For every node on the dominator tree with spill, walk up on the dominator
1317 // tree towards the Root node until it is reached. If there is other node
1318 // containing spill in the middle of the path, the previous spill saw will
1319 // be redundant and the node containing it will be removed. All the nodes on
1320 // the path starting from the first node with non-redundant spill to the Root
1321 // node will be added to the WorkSet, which will contain all the possible
1322 // locations where spills may be hoisted to after the loop below is done.
1323 for (const auto Spill : Spills) {
1324 MachineBasicBlock *Block = Spill->getParent();
1325 MachineDomTreeNode *Node = MDT[Block];
1326 MachineInstr *SpillToRm = nullptr;
1327 while (Node != RootIDomNode) {
1328 // If Node dominates Block, and it already contains a spill, the spill in
1329 // Block will be redundant.
1330 if (Node != MDT[Block] && SpillBBToSpill[Node]) {
1331 SpillToRm = SpillBBToSpill[MDT[Block]];
1332 break;
1333 /// If we see the Node already in WorkSet, the path from the Node to
1334 /// the Root node must already be traversed by another spill.
1335 /// Then no need to repeat.
1336 } else if (WorkSet.count(Node)) {
1337 break;
1338 } else {
1339 NodesOnPath.insert(Node);
1340 }
1341 Node = Node->getIDom();
1342 }
1343 if (SpillToRm) {
1344 SpillsToRm.push_back(SpillToRm);
1345 } else {
1346 // Add a BB containing the original spills to SpillsToKeep -- i.e.,
1347 // set the initial status before hoisting start. The value of BBs
1348 // containing original spills is set to 0, in order to descriminate
1349 // with BBs containing hoisted spills which will be inserted to
1350 // SpillsToKeep later during hoisting.
1351 SpillsToKeep[MDT[Block]] = 0;
1352 WorkSet.insert(NodesOnPath.begin(), NodesOnPath.end());
1353 }
1354 NodesOnPath.clear();
1355 }
1356
1357 // Sort the nodes in WorkSet in top-down order and save the nodes
1358 // in Orders. Orders will be used for hoisting in runHoistSpills.
1359 unsigned idx = 0;
1360 Orders.push_back(MDT.getBase().getNode(Root));
1361 do {
1362 MachineDomTreeNode *Node = Orders[idx++];
1363 for (MachineDomTreeNode *Child : Node->children()) {
1364 if (WorkSet.count(Child))
1365 Orders.push_back(Child);
1366 }
1367 } while (idx != Orders.size());
1368 assert(Orders.size() == WorkSet.size() &&
1369 "Orders have different size with WorkSet");
1370
1371 #ifndef NDEBUG
1372 LLVM_DEBUG(dbgs() << "Orders size is " << Orders.size() << "\n");
1373 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1374 for (; RIt != Orders.rend(); RIt++)
1375 LLVM_DEBUG(dbgs() << "BB" << (*RIt)->getBlock()->getNumber() << ",");
1376 LLVM_DEBUG(dbgs() << "\n");
1377 #endif
1378 }
1379
1380 /// Try to hoist spills according to BB hotness. The spills to removed will
1381 /// be saved in \p SpillsToRm. The spills to be inserted will be saved in
1382 /// \p SpillsToIns.
runHoistSpills(LiveInterval & OrigLI,VNInfo & OrigVNI,SmallPtrSet<MachineInstr *,16> & Spills,SmallVectorImpl<MachineInstr * > & SpillsToRm,DenseMap<MachineBasicBlock *,unsigned> & SpillsToIns)1383 void HoistSpillHelper::runHoistSpills(
1384 LiveInterval &OrigLI, VNInfo &OrigVNI,
1385 SmallPtrSet<MachineInstr *, 16> &Spills,
1386 SmallVectorImpl<MachineInstr *> &SpillsToRm,
1387 DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) {
1388 // Visit order of dominator tree nodes.
1389 SmallVector<MachineDomTreeNode *, 32> Orders;
1390 // SpillsToKeep contains all the nodes where spills are to be inserted
1391 // during hoisting. If the spill to be inserted is an original spill
1392 // (not a hoisted one), the value of the map entry is 0. If the spill
1393 // is a hoisted spill, the value of the map entry is the VReg to be used
1394 // as the source of the spill.
1395 DenseMap<MachineDomTreeNode *, unsigned> SpillsToKeep;
1396 // Map from BB to the first spill inside of it.
1397 DenseMap<MachineDomTreeNode *, MachineInstr *> SpillBBToSpill;
1398
1399 rmRedundantSpills(Spills, SpillsToRm, SpillBBToSpill);
1400
1401 MachineBasicBlock *Root = LIS.getMBBFromIndex(OrigVNI.def);
1402 getVisitOrders(Root, Spills, Orders, SpillsToRm, SpillsToKeep,
1403 SpillBBToSpill);
1404
1405 // SpillsInSubTreeMap keeps the map from a dom tree node to a pair of
1406 // nodes set and the cost of all the spills inside those nodes.
1407 // The nodes set are the locations where spills are to be inserted
1408 // in the subtree of current node.
1409 using NodesCostPair =
1410 std::pair<SmallPtrSet<MachineDomTreeNode *, 16>, BlockFrequency>;
1411 DenseMap<MachineDomTreeNode *, NodesCostPair> SpillsInSubTreeMap;
1412
1413 // Iterate Orders set in reverse order, which will be a bottom-up order
1414 // in the dominator tree. Once we visit a dom tree node, we know its
1415 // children have already been visited and the spill locations in the
1416 // subtrees of all the children have been determined.
1417 SmallVector<MachineDomTreeNode *, 32>::reverse_iterator RIt = Orders.rbegin();
1418 for (; RIt != Orders.rend(); RIt++) {
1419 MachineBasicBlock *Block = (*RIt)->getBlock();
1420
1421 // If Block contains an original spill, simply continue.
1422 if (SpillsToKeep.find(*RIt) != SpillsToKeep.end() && !SpillsToKeep[*RIt]) {
1423 SpillsInSubTreeMap[*RIt].first.insert(*RIt);
1424 // SpillsInSubTreeMap[*RIt].second contains the cost of spill.
1425 SpillsInSubTreeMap[*RIt].second = MBFI.getBlockFreq(Block);
1426 continue;
1427 }
1428
1429 // Collect spills in subtree of current node (*RIt) to
1430 // SpillsInSubTreeMap[*RIt].first.
1431 for (MachineDomTreeNode *Child : (*RIt)->children()) {
1432 if (SpillsInSubTreeMap.find(Child) == SpillsInSubTreeMap.end())
1433 continue;
1434 // The stmt "SpillsInSubTree = SpillsInSubTreeMap[*RIt].first" below
1435 // should be placed before getting the begin and end iterators of
1436 // SpillsInSubTreeMap[Child].first, or else the iterators may be
1437 // invalidated when SpillsInSubTreeMap[*RIt] is seen the first time
1438 // and the map grows and then the original buckets in the map are moved.
1439 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1440 SpillsInSubTreeMap[*RIt].first;
1441 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1442 SubTreeCost += SpillsInSubTreeMap[Child].second;
1443 auto BI = SpillsInSubTreeMap[Child].first.begin();
1444 auto EI = SpillsInSubTreeMap[Child].first.end();
1445 SpillsInSubTree.insert(BI, EI);
1446 SpillsInSubTreeMap.erase(Child);
1447 }
1448
1449 SmallPtrSet<MachineDomTreeNode *, 16> &SpillsInSubTree =
1450 SpillsInSubTreeMap[*RIt].first;
1451 BlockFrequency &SubTreeCost = SpillsInSubTreeMap[*RIt].second;
1452 // No spills in subtree, simply continue.
1453 if (SpillsInSubTree.empty())
1454 continue;
1455
1456 // Check whether Block is a possible candidate to insert spill.
1457 Register LiveReg;
1458 if (!isSpillCandBB(OrigLI, OrigVNI, *Block, LiveReg))
1459 continue;
1460
1461 // If there are multiple spills that could be merged, bias a little
1462 // to hoist the spill.
1463 BranchProbability MarginProb = (SpillsInSubTree.size() > 1)
1464 ? BranchProbability(9, 10)
1465 : BranchProbability(1, 1);
1466 if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
1467 // Hoist: Move spills to current Block.
1468 for (const auto SpillBB : SpillsInSubTree) {
1469 // When SpillBB is a BB contains original spill, insert the spill
1470 // to SpillsToRm.
1471 if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
1472 !SpillsToKeep[SpillBB]) {
1473 MachineInstr *SpillToRm = SpillBBToSpill[SpillBB];
1474 SpillsToRm.push_back(SpillToRm);
1475 }
1476 // SpillBB will not contain spill anymore, remove it from SpillsToKeep.
1477 SpillsToKeep.erase(SpillBB);
1478 }
1479 // Current Block is the BB containing the new hoisted spill. Add it to
1480 // SpillsToKeep. LiveReg is the source of the new spill.
1481 SpillsToKeep[*RIt] = LiveReg;
1482 LLVM_DEBUG({
1483 dbgs() << "spills in BB: ";
1484 for (const auto Rspill : SpillsInSubTree)
1485 dbgs() << Rspill->getBlock()->getNumber() << " ";
1486 dbgs() << "were promoted to BB" << (*RIt)->getBlock()->getNumber()
1487 << "\n";
1488 });
1489 SpillsInSubTree.clear();
1490 SpillsInSubTree.insert(*RIt);
1491 SubTreeCost = MBFI.getBlockFreq(Block);
1492 }
1493 }
1494 // For spills in SpillsToKeep with LiveReg set (i.e., not original spill),
1495 // save them to SpillsToIns.
1496 for (const auto &Ent : SpillsToKeep) {
1497 if (Ent.second)
1498 SpillsToIns[Ent.first->getBlock()] = Ent.second;
1499 }
1500 }
1501
1502 /// For spills with equal values, remove redundant spills and hoist those left
1503 /// to less hot spots.
1504 ///
1505 /// Spills with equal values will be collected into the same set in
1506 /// MergeableSpills when spill is inserted. These equal spills are originated
1507 /// from the same defining instruction and are dominated by the instruction.
1508 /// Before hoisting all the equal spills, redundant spills inside in the same
1509 /// BB are first marked to be deleted. Then starting from the spills left, walk
1510 /// up on the dominator tree towards the Root node where the define instruction
1511 /// is located, mark the dominated spills to be deleted along the way and
1512 /// collect the BB nodes on the path from non-dominated spills to the define
1513 /// instruction into a WorkSet. The nodes in WorkSet are the candidate places
1514 /// where we are considering to hoist the spills. We iterate the WorkSet in
1515 /// bottom-up order, and for each node, we will decide whether to hoist spills
1516 /// inside its subtree to that node. In this way, we can get benefit locally
1517 /// even if hoisting all the equal spills to one cold place is impossible.
hoistAllSpills()1518 void HoistSpillHelper::hoistAllSpills() {
1519 SmallVector<Register, 4> NewVRegs;
1520 LiveRangeEdit Edit(nullptr, NewVRegs, MF, LIS, &VRM, this);
1521
1522 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
1523 Register Reg = Register::index2VirtReg(i);
1524 Register Original = VRM.getPreSplitReg(Reg);
1525 if (!MRI.def_empty(Reg))
1526 Virt2SiblingsMap[Original].insert(Reg);
1527 }
1528
1529 // Each entry in MergeableSpills contains a spill set with equal values.
1530 for (auto &Ent : MergeableSpills) {
1531 int Slot = Ent.first.first;
1532 LiveInterval &OrigLI = *StackSlotToOrigLI[Slot];
1533 VNInfo *OrigVNI = Ent.first.second;
1534 SmallPtrSet<MachineInstr *, 16> &EqValSpills = Ent.second;
1535 if (Ent.second.empty())
1536 continue;
1537
1538 LLVM_DEBUG({
1539 dbgs() << "\nFor Slot" << Slot << " and VN" << OrigVNI->id << ":\n"
1540 << "Equal spills in BB: ";
1541 for (const auto spill : EqValSpills)
1542 dbgs() << spill->getParent()->getNumber() << " ";
1543 dbgs() << "\n";
1544 });
1545
1546 // SpillsToRm is the spill set to be removed from EqValSpills.
1547 SmallVector<MachineInstr *, 16> SpillsToRm;
1548 // SpillsToIns is the spill set to be newly inserted after hoisting.
1549 DenseMap<MachineBasicBlock *, unsigned> SpillsToIns;
1550
1551 runHoistSpills(OrigLI, *OrigVNI, EqValSpills, SpillsToRm, SpillsToIns);
1552
1553 LLVM_DEBUG({
1554 dbgs() << "Finally inserted spills in BB: ";
1555 for (const auto &Ispill : SpillsToIns)
1556 dbgs() << Ispill.first->getNumber() << " ";
1557 dbgs() << "\nFinally removed spills in BB: ";
1558 for (const auto Rspill : SpillsToRm)
1559 dbgs() << Rspill->getParent()->getNumber() << " ";
1560 dbgs() << "\n";
1561 });
1562
1563 // Stack live range update.
1564 LiveInterval &StackIntvl = LSS.getInterval(Slot);
1565 if (!SpillsToIns.empty() || !SpillsToRm.empty())
1566 StackIntvl.MergeValueInAsValue(OrigLI, OrigVNI,
1567 StackIntvl.getValNumInfo(0));
1568
1569 // Insert hoisted spills.
1570 for (auto const &Insert : SpillsToIns) {
1571 MachineBasicBlock *BB = Insert.first;
1572 Register LiveReg = Insert.second;
1573 MachineBasicBlock::iterator MII = IPA.getLastInsertPointIter(OrigLI, *BB);
1574 MachineInstrSpan MIS(MII, BB);
1575 TII.storeRegToStackSlot(*BB, MII, LiveReg, false, Slot,
1576 MRI.getRegClass(LiveReg), &TRI);
1577 LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
1578 for (const MachineInstr &MI : make_range(MIS.begin(), MII))
1579 getVDefInterval(MI, LIS);
1580 ++NumSpills;
1581 }
1582
1583 // Remove redundant spills or change them to dead instructions.
1584 NumSpills -= SpillsToRm.size();
1585 for (auto const RMEnt : SpillsToRm) {
1586 RMEnt->setDesc(TII.get(TargetOpcode::KILL));
1587 for (unsigned i = RMEnt->getNumOperands(); i; --i) {
1588 MachineOperand &MO = RMEnt->getOperand(i - 1);
1589 if (MO.isReg() && MO.isImplicit() && MO.isDef() && !MO.isDead())
1590 RMEnt->RemoveOperand(i - 1);
1591 }
1592 }
1593 Edit.eliminateDeadDefs(SpillsToRm, None, AA);
1594 }
1595 }
1596
1597 /// For VirtReg clone, the \p New register should have the same physreg or
1598 /// stackslot as the \p old register.
LRE_DidCloneVirtReg(Register New,Register Old)1599 void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) {
1600 if (VRM.hasPhys(Old))
1601 VRM.assignVirt2Phys(New, VRM.getPhys(Old));
1602 else if (VRM.getStackSlot(Old) != VirtRegMap::NO_STACK_SLOT)
1603 VRM.assignVirt2StackSlot(New, VRM.getStackSlot(Old));
1604 else
1605 llvm_unreachable("VReg should be assigned either physreg or stackslot");
1606 if (VRM.hasShape(Old))
1607 VRM.assignVirt2Shape(New, VRM.getShape(Old));
1608 }
1609