1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
44 #include <algorithm>
45 #include <limits>
46 #include <cmath>
47 using namespace llvm;
48 
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51                                   cl::init(false), cl::Hidden);
52 
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits    , "Number of intervals split");
56 
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS(LiveIntervals, "liveintervals",
59                 "Live Interval Analysis", false, false);
60 
getAnalysisUsage(AnalysisUsage & AU) const61 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
62   AU.setPreservesCFG();
63   AU.addRequired<AliasAnalysis>();
64   AU.addPreserved<AliasAnalysis>();
65   AU.addRequired<LiveVariables>();
66   AU.addPreserved<LiveVariables>();
67   AU.addRequired<MachineLoopInfo>();
68   AU.addPreserved<MachineLoopInfo>();
69   AU.addPreservedID(MachineDominatorsID);
70 
71   if (!StrongPHIElim) {
72     AU.addPreservedID(PHIEliminationID);
73     AU.addRequiredID(PHIEliminationID);
74   }
75 
76   AU.addRequiredID(TwoAddressInstructionPassID);
77   AU.addPreserved<ProcessImplicitDefs>();
78   AU.addRequired<ProcessImplicitDefs>();
79   AU.addPreserved<SlotIndexes>();
80   AU.addRequiredTransitive<SlotIndexes>();
81   MachineFunctionPass::getAnalysisUsage(AU);
82 }
83 
releaseMemory()84 void LiveIntervals::releaseMemory() {
85   // Free the live intervals themselves.
86   for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87        E = r2iMap_.end(); I != E; ++I)
88     delete I->second;
89 
90   r2iMap_.clear();
91 
92   // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
93   VNInfoAllocator.Reset();
94   while (!CloneMIs.empty()) {
95     MachineInstr *MI = CloneMIs.back();
96     CloneMIs.pop_back();
97     mf_->DeleteMachineInstr(MI);
98   }
99 }
100 
101 /// runOnMachineFunction - Register allocate the whole function
102 ///
runOnMachineFunction(MachineFunction & fn)103 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
104   mf_ = &fn;
105   mri_ = &mf_->getRegInfo();
106   tm_ = &fn.getTarget();
107   tri_ = tm_->getRegisterInfo();
108   tii_ = tm_->getInstrInfo();
109   aa_ = &getAnalysis<AliasAnalysis>();
110   lv_ = &getAnalysis<LiveVariables>();
111   indexes_ = &getAnalysis<SlotIndexes>();
112   allocatableRegs_ = tri_->getAllocatableSet(fn);
113 
114   computeIntervals();
115 
116   numIntervals += getNumIntervals();
117 
118   DEBUG(dump());
119   return true;
120 }
121 
122 /// print - Implement the dump method.
print(raw_ostream & OS,const Module *) const123 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
124   OS << "********** INTERVALS **********\n";
125   for (const_iterator I = begin(), E = end(); I != E; ++I) {
126     I->second->print(OS, tri_);
127     OS << "\n";
128   }
129 
130   printInstrs(OS);
131 }
132 
printInstrs(raw_ostream & OS) const133 void LiveIntervals::printInstrs(raw_ostream &OS) const {
134   OS << "********** MACHINEINSTRS **********\n";
135 
136   for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
137        mbbi != mbbe; ++mbbi) {
138     OS << "BB#" << mbbi->getNumber()
139        << ":\t\t# derived from " << mbbi->getName() << "\n";
140     for (MachineBasicBlock::iterator mii = mbbi->begin(),
141            mie = mbbi->end(); mii != mie; ++mii) {
142       if (mii->isDebugValue())
143         OS << "    \t" << *mii;
144       else
145         OS << getInstructionIndex(mii) << '\t' << *mii;
146     }
147   }
148 }
149 
dumpInstrs() const150 void LiveIntervals::dumpInstrs() const {
151   printInstrs(dbgs());
152 }
153 
conflictsWithPhysReg(const LiveInterval & li,VirtRegMap & vrm,unsigned reg)154 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
155                                          VirtRegMap &vrm, unsigned reg) {
156   // We don't handle fancy stuff crossing basic block boundaries
157   if (li.ranges.size() != 1)
158     return true;
159   const LiveRange &range = li.ranges.front();
160   SlotIndex idx = range.start.getBaseIndex();
161   SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
162 
163   // Skip deleted instructions
164   MachineInstr *firstMI = getInstructionFromIndex(idx);
165   while (!firstMI && idx != end) {
166     idx = idx.getNextIndex();
167     firstMI = getInstructionFromIndex(idx);
168   }
169   if (!firstMI)
170     return false;
171 
172   // Find last instruction in range
173   SlotIndex lastIdx = end.getPrevIndex();
174   MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
175   while (!lastMI && lastIdx != idx) {
176     lastIdx = lastIdx.getPrevIndex();
177     lastMI = getInstructionFromIndex(lastIdx);
178   }
179   if (!lastMI)
180     return false;
181 
182   // Range cannot cross basic block boundaries or terminators
183   MachineBasicBlock *MBB = firstMI->getParent();
184   if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185     return true;
186 
187   MachineBasicBlock::const_iterator E = lastMI;
188   ++E;
189   for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
190     const MachineInstr &MI = *I;
191 
192     // Allow copies to and from li.reg
193     if (MI.isCopy())
194       if (MI.getOperand(0).getReg() == li.reg ||
195           MI.getOperand(1).getReg() == li.reg)
196         continue;
197 
198     // Check for operands using reg
199     for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
200       const MachineOperand& mop = MI.getOperand(i);
201       if (!mop.isReg())
202         continue;
203       unsigned PhysReg = mop.getReg();
204       if (PhysReg == 0 || PhysReg == li.reg)
205         continue;
206       if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
207         if (!vrm.hasPhys(PhysReg))
208           continue;
209         PhysReg = vrm.getPhys(PhysReg);
210       }
211       if (PhysReg && tri_->regsOverlap(PhysReg, reg))
212         return true;
213     }
214   }
215 
216   // No conflicts found.
217   return false;
218 }
219 
conflictsWithAliasRef(LiveInterval & li,unsigned Reg,SmallPtrSet<MachineInstr *,32> & JoinedCopies)220 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
221                                   SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
222   for (LiveInterval::Ranges::const_iterator
223          I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
224     for (SlotIndex index = I->start.getBaseIndex(),
225            end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
226            index != end;
227            index = index.getNextIndex()) {
228       MachineInstr *MI = getInstructionFromIndex(index);
229       if (!MI)
230         continue;               // skip deleted instructions
231 
232       if (JoinedCopies.count(MI))
233         continue;
234       for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
235         MachineOperand& MO = MI->getOperand(i);
236         if (!MO.isReg())
237           continue;
238         unsigned PhysReg = MO.getReg();
239         if (PhysReg == 0 || PhysReg == Reg ||
240             TargetRegisterInfo::isVirtualRegister(PhysReg))
241           continue;
242         if (tri_->regsOverlap(Reg, PhysReg))
243           return true;
244       }
245     }
246   }
247 
248   return false;
249 }
250 
251 #ifndef NDEBUG
printRegName(unsigned reg,const TargetRegisterInfo * tri_)252 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
253   if (TargetRegisterInfo::isPhysicalRegister(reg))
254     dbgs() << tri_->getName(reg);
255   else
256     dbgs() << "%reg" << reg;
257 }
258 #endif
259 
260 static
MultipleDefsBySameMI(const MachineInstr & MI,unsigned MOIdx)261 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
262   unsigned Reg = MI.getOperand(MOIdx).getReg();
263   for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
264     const MachineOperand &MO = MI.getOperand(i);
265     if (!MO.isReg())
266       continue;
267     if (MO.getReg() == Reg && MO.isDef()) {
268       assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
269              MI.getOperand(MOIdx).getSubReg() &&
270              (MO.getSubReg() || MO.isImplicit()));
271       return true;
272     }
273   }
274   return false;
275 }
276 
277 /// isPartialRedef - Return true if the specified def at the specific index is
278 /// partially re-defining the specified live interval. A common case of this is
279 /// a definition of the sub-register.
isPartialRedef(SlotIndex MIIdx,MachineOperand & MO,LiveInterval & interval)280 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
281                                    LiveInterval &interval) {
282   if (!MO.getSubReg() || MO.isEarlyClobber())
283     return false;
284 
285   SlotIndex RedefIndex = MIIdx.getDefIndex();
286   const LiveRange *OldLR =
287     interval.getLiveRangeContaining(RedefIndex.getUseIndex());
288   if (OldLR->valno->isDefAccurate()) {
289     MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
290     return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
291   }
292   return false;
293 }
294 
handleVirtualRegisterDef(MachineBasicBlock * mbb,MachineBasicBlock::iterator mi,SlotIndex MIIdx,MachineOperand & MO,unsigned MOIdx,LiveInterval & interval)295 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
296                                              MachineBasicBlock::iterator mi,
297                                              SlotIndex MIIdx,
298                                              MachineOperand& MO,
299                                              unsigned MOIdx,
300                                              LiveInterval &interval) {
301   DEBUG({
302       dbgs() << "\t\tregister: ";
303       printRegName(interval.reg, tri_);
304     });
305 
306   // Virtual registers may be defined multiple times (due to phi
307   // elimination and 2-addr elimination).  Much of what we do only has to be
308   // done once for the vreg.  We use an empty interval to detect the first
309   // time we see a vreg.
310   LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
311   if (interval.empty()) {
312     // Get the Idx of the defining instructions.
313     SlotIndex defIndex = MIIdx.getDefIndex();
314     // Earlyclobbers move back one, so that they overlap the live range
315     // of inputs.
316     if (MO.isEarlyClobber())
317       defIndex = MIIdx.getUseIndex();
318 
319     // Make sure the first definition is not a partial redefinition. Add an
320     // <imp-def> of the full register.
321     if (MO.getSubReg())
322       mi->addRegisterDefined(interval.reg);
323 
324     MachineInstr *CopyMI = NULL;
325     if (mi->isCopyLike()) {
326       CopyMI = mi;
327     }
328 
329     VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
330                                           VNInfoAllocator);
331     assert(ValNo->id == 0 && "First value in interval is not 0?");
332 
333     // Loop over all of the blocks that the vreg is defined in.  There are
334     // two cases we have to handle here.  The most common case is a vreg
335     // whose lifetime is contained within a basic block.  In this case there
336     // will be a single kill, in MBB, which comes after the definition.
337     if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338       // FIXME: what about dead vars?
339       SlotIndex killIdx;
340       if (vi.Kills[0] != mi)
341         killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
342       else
343         killIdx = defIndex.getStoreIndex();
344 
345       // If the kill happens after the definition, we have an intra-block
346       // live range.
347       if (killIdx > defIndex) {
348         assert(vi.AliveBlocks.empty() &&
349                "Shouldn't be alive across any blocks!");
350         LiveRange LR(defIndex, killIdx, ValNo);
351         interval.addRange(LR);
352         DEBUG(dbgs() << " +" << LR << "\n");
353         return;
354       }
355     }
356 
357     // The other case we handle is when a virtual register lives to the end
358     // of the defining block, potentially live across some blocks, then is
359     // live into some number of blocks, but gets killed.  Start by adding a
360     // range that goes from this definition to the end of the defining block.
361     LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
362     DEBUG(dbgs() << " +" << NewLR);
363     interval.addRange(NewLR);
364 
365     bool PHIJoin = lv_->isPHIJoin(interval.reg);
366 
367     if (PHIJoin) {
368       // A phi join register is killed at the end of the MBB and revived as a new
369       // valno in the killing blocks.
370       assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
371       DEBUG(dbgs() << " phi-join");
372       ValNo->setHasPHIKill(true);
373     } else {
374       // Iterate over all of the blocks that the variable is completely
375       // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
376       // live interval.
377       for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
378                E = vi.AliveBlocks.end(); I != E; ++I) {
379         MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
380         LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
381         interval.addRange(LR);
382         DEBUG(dbgs() << " +" << LR);
383       }
384     }
385 
386     // Finally, this virtual register is live from the start of any killing
387     // block to the 'use' slot of the killing instruction.
388     for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
389       MachineInstr *Kill = vi.Kills[i];
390       SlotIndex Start = getMBBStartIdx(Kill->getParent());
391       SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
392 
393       // Create interval with one of a NEW value number.  Note that this value
394       // number isn't actually defined by an instruction, weird huh? :)
395       if (PHIJoin) {
396         ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
397                                       VNInfoAllocator);
398         ValNo->setIsPHIDef(true);
399       }
400       LiveRange LR(Start, killIdx, ValNo);
401       interval.addRange(LR);
402       DEBUG(dbgs() << " +" << LR);
403     }
404 
405   } else {
406     if (MultipleDefsBySameMI(*mi, MOIdx))
407       // Multiple defs of the same virtual register by the same instruction.
408       // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
409       // This is likely due to elimination of REG_SEQUENCE instructions. Return
410       // here since there is nothing to do.
411       return;
412 
413     // If this is the second time we see a virtual register definition, it
414     // must be due to phi elimination or two addr elimination.  If this is
415     // the result of two address elimination, then the vreg is one of the
416     // def-and-use register operand.
417 
418     // It may also be partial redef like this:
419     // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
420     // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
421     bool PartReDef = isPartialRedef(MIIdx, MO, interval);
422     if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
423       // If this is a two-address definition, then we have already processed
424       // the live range.  The only problem is that we didn't realize there
425       // are actually two values in the live interval.  Because of this we
426       // need to take the LiveRegion that defines this register and split it
427       // into two values.
428       SlotIndex RedefIndex = MIIdx.getDefIndex();
429       if (MO.isEarlyClobber())
430         RedefIndex = MIIdx.getUseIndex();
431 
432       const LiveRange *OldLR =
433         interval.getLiveRangeContaining(RedefIndex.getUseIndex());
434       VNInfo *OldValNo = OldLR->valno;
435       SlotIndex DefIndex = OldValNo->def.getDefIndex();
436 
437       // Delete the previous value, which should be short and continuous,
438       // because the 2-addr copy must be in the same MBB as the redef.
439       interval.removeRange(DefIndex, RedefIndex);
440 
441       // The new value number (#1) is defined by the instruction we claimed
442       // defined value #0.
443       VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
444                                             false, // update at *
445                                             VNInfoAllocator);
446       ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
447 
448       // Value#0 is now defined by the 2-addr instruction.
449       OldValNo->def  = RedefIndex;
450       OldValNo->setCopy(0);
451 
452       // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
453       if (PartReDef && mi->isCopyLike())
454         OldValNo->setCopy(&*mi);
455 
456       // Add the new live interval which replaces the range for the input copy.
457       LiveRange LR(DefIndex, RedefIndex, ValNo);
458       DEBUG(dbgs() << " replace range with " << LR);
459       interval.addRange(LR);
460 
461       // If this redefinition is dead, we need to add a dummy unit live
462       // range covering the def slot.
463       if (MO.isDead())
464         interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
465                                     OldValNo));
466 
467       DEBUG({
468           dbgs() << " RESULT: ";
469           interval.print(dbgs(), tri_);
470         });
471     } else if (lv_->isPHIJoin(interval.reg)) {
472       // In the case of PHI elimination, each variable definition is only
473       // live until the end of the block.  We've already taken care of the
474       // rest of the live range.
475 
476       SlotIndex defIndex = MIIdx.getDefIndex();
477       if (MO.isEarlyClobber())
478         defIndex = MIIdx.getUseIndex();
479 
480       VNInfo *ValNo;
481       MachineInstr *CopyMI = NULL;
482       if (mi->isCopyLike())
483         CopyMI = mi;
484       ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
485 
486       SlotIndex killIndex = getMBBEndIdx(mbb);
487       LiveRange LR(defIndex, killIndex, ValNo);
488       interval.addRange(LR);
489       ValNo->setHasPHIKill(true);
490       DEBUG(dbgs() << " phi-join +" << LR);
491     } else {
492       llvm_unreachable("Multiply defined register");
493     }
494   }
495 
496   DEBUG(dbgs() << '\n');
497 }
498 
handlePhysicalRegisterDef(MachineBasicBlock * MBB,MachineBasicBlock::iterator mi,SlotIndex MIIdx,MachineOperand & MO,LiveInterval & interval,MachineInstr * CopyMI)499 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
500                                               MachineBasicBlock::iterator mi,
501                                               SlotIndex MIIdx,
502                                               MachineOperand& MO,
503                                               LiveInterval &interval,
504                                               MachineInstr *CopyMI) {
505   // A physical register cannot be live across basic block, so its
506   // lifetime must end somewhere in its defining basic block.
507   DEBUG({
508       dbgs() << "\t\tregister: ";
509       printRegName(interval.reg, tri_);
510     });
511 
512   SlotIndex baseIndex = MIIdx;
513   SlotIndex start = baseIndex.getDefIndex();
514   // Earlyclobbers move back one.
515   if (MO.isEarlyClobber())
516     start = MIIdx.getUseIndex();
517   SlotIndex end = start;
518 
519   // If it is not used after definition, it is considered dead at
520   // the instruction defining it. Hence its interval is:
521   // [defSlot(def), defSlot(def)+1)
522   // For earlyclobbers, the defSlot was pushed back one; the extra
523   // advance below compensates.
524   if (MO.isDead()) {
525     DEBUG(dbgs() << " dead");
526     end = start.getStoreIndex();
527     goto exit;
528   }
529 
530   // If it is not dead on definition, it must be killed by a
531   // subsequent instruction. Hence its interval is:
532   // [defSlot(def), useSlot(kill)+1)
533   baseIndex = baseIndex.getNextIndex();
534   while (++mi != MBB->end()) {
535 
536     if (mi->isDebugValue())
537       continue;
538     if (getInstructionFromIndex(baseIndex) == 0)
539       baseIndex = indexes_->getNextNonNullIndex(baseIndex);
540 
541     if (mi->killsRegister(interval.reg, tri_)) {
542       DEBUG(dbgs() << " killed");
543       end = baseIndex.getDefIndex();
544       goto exit;
545     } else {
546       int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
547       if (DefIdx != -1) {
548         if (mi->isRegTiedToUseOperand(DefIdx)) {
549           // Two-address instruction.
550           end = baseIndex.getDefIndex();
551         } else {
552           // Another instruction redefines the register before it is ever read.
553           // Then the register is essentially dead at the instruction that
554           // defines it. Hence its interval is:
555           // [defSlot(def), defSlot(def)+1)
556           DEBUG(dbgs() << " dead");
557           end = start.getStoreIndex();
558         }
559         goto exit;
560       }
561     }
562 
563     baseIndex = baseIndex.getNextIndex();
564   }
565 
566   // The only case we should have a dead physreg here without a killing or
567   // instruction where we know it's dead is if it is live-in to the function
568   // and never used. Another possible case is the implicit use of the
569   // physical register has been deleted by two-address pass.
570   end = start.getStoreIndex();
571 
572 exit:
573   assert(start < end && "did not find end of interval?");
574 
575   // Already exists? Extend old live interval.
576   LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
577   bool Extend = OldLR != interval.end();
578   VNInfo *ValNo = Extend
579     ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
580   if (MO.isEarlyClobber() && Extend)
581     ValNo->setHasRedefByEC(true);
582   LiveRange LR(start, end, ValNo);
583   interval.addRange(LR);
584   DEBUG(dbgs() << " +" << LR << '\n');
585 }
586 
handleRegisterDef(MachineBasicBlock * MBB,MachineBasicBlock::iterator MI,SlotIndex MIIdx,MachineOperand & MO,unsigned MOIdx)587 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
588                                       MachineBasicBlock::iterator MI,
589                                       SlotIndex MIIdx,
590                                       MachineOperand& MO,
591                                       unsigned MOIdx) {
592   if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
593     handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
594                              getOrCreateInterval(MO.getReg()));
595   else if (allocatableRegs_[MO.getReg()]) {
596     MachineInstr *CopyMI = NULL;
597     if (MI->isCopyLike())
598       CopyMI = MI;
599     handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
600                               getOrCreateInterval(MO.getReg()), CopyMI);
601     // Def of a register also defines its sub-registers.
602     for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
603       // If MI also modifies the sub-register explicitly, avoid processing it
604       // more than once. Do not pass in TRI here so it checks for exact match.
605       if (!MI->definesRegister(*AS))
606         handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
607                                   getOrCreateInterval(*AS), 0);
608   }
609 }
610 
handleLiveInRegister(MachineBasicBlock * MBB,SlotIndex MIIdx,LiveInterval & interval,bool isAlias)611 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
612                                          SlotIndex MIIdx,
613                                          LiveInterval &interval, bool isAlias) {
614   DEBUG({
615       dbgs() << "\t\tlivein register: ";
616       printRegName(interval.reg, tri_);
617     });
618 
619   // Look for kills, if it reaches a def before it's killed, then it shouldn't
620   // be considered a livein.
621   MachineBasicBlock::iterator mi = MBB->begin();
622   MachineBasicBlock::iterator E = MBB->end();
623   // Skip over DBG_VALUE at the start of the MBB.
624   if (mi != E && mi->isDebugValue()) {
625     while (++mi != E && mi->isDebugValue())
626       ;
627     if (mi == E)
628       // MBB is empty except for DBG_VALUE's.
629       return;
630   }
631 
632   SlotIndex baseIndex = MIIdx;
633   SlotIndex start = baseIndex;
634   if (getInstructionFromIndex(baseIndex) == 0)
635     baseIndex = indexes_->getNextNonNullIndex(baseIndex);
636 
637   SlotIndex end = baseIndex;
638   bool SeenDefUse = false;
639 
640   while (mi != E) {
641     if (mi->killsRegister(interval.reg, tri_)) {
642       DEBUG(dbgs() << " killed");
643       end = baseIndex.getDefIndex();
644       SeenDefUse = true;
645       break;
646     } else if (mi->definesRegister(interval.reg, tri_)) {
647       // Another instruction redefines the register before it is ever read.
648       // Then the register is essentially dead at the instruction that defines
649       // it. Hence its interval is:
650       // [defSlot(def), defSlot(def)+1)
651       DEBUG(dbgs() << " dead");
652       end = start.getStoreIndex();
653       SeenDefUse = true;
654       break;
655     }
656 
657     while (++mi != E && mi->isDebugValue())
658       // Skip over DBG_VALUE.
659       ;
660     if (mi != E)
661       baseIndex = indexes_->getNextNonNullIndex(baseIndex);
662   }
663 
664   // Live-in register might not be used at all.
665   if (!SeenDefUse) {
666     if (isAlias) {
667       DEBUG(dbgs() << " dead");
668       end = MIIdx.getStoreIndex();
669     } else {
670       DEBUG(dbgs() << " live through");
671       end = baseIndex;
672     }
673   }
674 
675   VNInfo *vni =
676     interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
677                           0, false, VNInfoAllocator);
678   vni->setIsPHIDef(true);
679   LiveRange LR(start, end, vni);
680 
681   interval.addRange(LR);
682   DEBUG(dbgs() << " +" << LR << '\n');
683 }
684 
685 /// computeIntervals - computes the live intervals for virtual
686 /// registers. for some ordering of the machine instructions [1,N] a
687 /// live interval is an interval [i, j) where 1 <= i <= j < N for
688 /// which a variable is live
computeIntervals()689 void LiveIntervals::computeIntervals() {
690   DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
691                << "********** Function: "
692                << ((Value*)mf_->getFunction())->getName() << '\n');
693 
694   SmallVector<unsigned, 8> UndefUses;
695   for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
696        MBBI != E; ++MBBI) {
697     MachineBasicBlock *MBB = MBBI;
698     if (MBB->empty())
699       continue;
700 
701     // Track the index of the current machine instr.
702     SlotIndex MIIndex = getMBBStartIdx(MBB);
703     DEBUG(dbgs() << "BB#" << MBB->getNumber()
704           << ":\t\t# derived from " << MBB->getName() << "\n");
705 
706     // Create intervals for live-ins to this BB first.
707     for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
708            LE = MBB->livein_end(); LI != LE; ++LI) {
709       handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
710       // Multiple live-ins can alias the same register.
711       for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
712         if (!hasInterval(*AS))
713           handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
714                                true);
715     }
716 
717     // Skip over empty initial indices.
718     if (getInstructionFromIndex(MIIndex) == 0)
719       MIIndex = indexes_->getNextNonNullIndex(MIIndex);
720 
721     for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
722          MI != miEnd; ++MI) {
723       DEBUG(dbgs() << MIIndex << "\t" << *MI);
724       if (MI->isDebugValue())
725         continue;
726 
727       // Handle defs.
728       for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
729         MachineOperand &MO = MI->getOperand(i);
730         if (!MO.isReg() || !MO.getReg())
731           continue;
732 
733         // handle register defs - build intervals
734         if (MO.isDef())
735           handleRegisterDef(MBB, MI, MIIndex, MO, i);
736         else if (MO.isUndef())
737           UndefUses.push_back(MO.getReg());
738       }
739 
740       // Move to the next instr slot.
741       MIIndex = indexes_->getNextNonNullIndex(MIIndex);
742     }
743   }
744 
745   // Create empty intervals for registers defined by implicit_def's (except
746   // for those implicit_def that define values which are liveout of their
747   // blocks.
748   for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
749     unsigned UndefReg = UndefUses[i];
750     (void)getOrCreateInterval(UndefReg);
751   }
752 }
753 
createInterval(unsigned reg)754 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
755   float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
756   return new LiveInterval(reg, Weight);
757 }
758 
759 /// dupInterval - Duplicate a live interval. The caller is responsible for
760 /// managing the allocated memory.
dupInterval(LiveInterval * li)761 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
762   LiveInterval *NewLI = createInterval(li->reg);
763   NewLI->Copy(*li, mri_, getVNInfoAllocator());
764   return NewLI;
765 }
766 
767 //===----------------------------------------------------------------------===//
768 // Register allocator hooks.
769 //
770 
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
getReMatImplicitUse(const LiveInterval & li,MachineInstr * MI) const774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775                                             MachineInstr *MI) const {
776   unsigned RegOp = 0;
777   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778     MachineOperand &MO = MI->getOperand(i);
779     if (!MO.isReg() || !MO.isUse())
780       continue;
781     unsigned Reg = MO.getReg();
782     if (Reg == 0 || Reg == li.reg)
783       continue;
784 
785     if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
786         !allocatableRegs_[Reg])
787       continue;
788     // FIXME: For now, only remat MI with at most one register operand.
789     assert(!RegOp &&
790            "Can't rematerialize instruction with multiple register operand!");
791     RegOp = MO.getReg();
792 #ifndef NDEBUG
793     break;
794 #endif
795   }
796   return RegOp;
797 }
798 
799 /// isValNoAvailableAt - Return true if the val# of the specified interval
800 /// which reaches the given instruction also reaches the specified use index.
isValNoAvailableAt(const LiveInterval & li,MachineInstr * MI,SlotIndex UseIdx) const801 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
802                                        SlotIndex UseIdx) const {
803   SlotIndex Index = getInstructionIndex(MI);
804   VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
805   LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
806   return UI != li.end() && UI->valno == ValNo;
807 }
808 
809 /// isReMaterializable - Returns true if the definition MI of the specified
810 /// val# of the specified interval is re-materializable.
isReMaterializable(const LiveInterval & li,const VNInfo * ValNo,MachineInstr * MI,SmallVectorImpl<LiveInterval * > & SpillIs,bool & isLoad)811 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
812                                        const VNInfo *ValNo, MachineInstr *MI,
813                                        SmallVectorImpl<LiveInterval*> &SpillIs,
814                                        bool &isLoad) {
815   if (DisableReMat)
816     return false;
817 
818   if (!tii_->isTriviallyReMaterializable(MI, aa_))
819     return false;
820 
821   // Target-specific code can mark an instruction as being rematerializable
822   // if it has one virtual reg use, though it had better be something like
823   // a PIC base register which is likely to be live everywhere.
824   unsigned ImpUse = getReMatImplicitUse(li, MI);
825   if (ImpUse) {
826     const LiveInterval &ImpLi = getInterval(ImpUse);
827     for (MachineRegisterInfo::use_nodbg_iterator
828            ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
829          ri != re; ++ri) {
830       MachineInstr *UseMI = &*ri;
831       SlotIndex UseIdx = getInstructionIndex(UseMI);
832       if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
833         continue;
834       if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
835         return false;
836     }
837 
838     // If a register operand of the re-materialized instruction is going to
839     // be spilled next, then it's not legal to re-materialize this instruction.
840     for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
841       if (ImpUse == SpillIs[i]->reg)
842         return false;
843   }
844   return true;
845 }
846 
847 /// isReMaterializable - Returns true if the definition MI of the specified
848 /// val# of the specified interval is re-materializable.
isReMaterializable(const LiveInterval & li,const VNInfo * ValNo,MachineInstr * MI)849 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850                                        const VNInfo *ValNo, MachineInstr *MI) {
851   SmallVector<LiveInterval*, 4> Dummy1;
852   bool Dummy2;
853   return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
854 }
855 
856 /// isReMaterializable - Returns true if every definition of MI of every
857 /// val# of the specified interval is re-materializable.
isReMaterializable(const LiveInterval & li,SmallVectorImpl<LiveInterval * > & SpillIs,bool & isLoad)858 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
859                                        SmallVectorImpl<LiveInterval*> &SpillIs,
860                                        bool &isLoad) {
861   isLoad = false;
862   for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
863        i != e; ++i) {
864     const VNInfo *VNI = *i;
865     if (VNI->isUnused())
866       continue; // Dead val#.
867     // Is the def for the val# rematerializable?
868     if (!VNI->isDefAccurate())
869       return false;
870     MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
871     bool DefIsLoad = false;
872     if (!ReMatDefMI ||
873         !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
874       return false;
875     isLoad |= DefIsLoad;
876   }
877   return true;
878 }
879 
880 /// FilterFoldedOps - Filter out two-address use operands. Return
881 /// true if it finds any issue with the operands that ought to prevent
882 /// folding.
FilterFoldedOps(MachineInstr * MI,SmallVector<unsigned,2> & Ops,unsigned & MRInfo,SmallVector<unsigned,2> & FoldOps)883 static bool FilterFoldedOps(MachineInstr *MI,
884                             SmallVector<unsigned, 2> &Ops,
885                             unsigned &MRInfo,
886                             SmallVector<unsigned, 2> &FoldOps) {
887   MRInfo = 0;
888   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
889     unsigned OpIdx = Ops[i];
890     MachineOperand &MO = MI->getOperand(OpIdx);
891     // FIXME: fold subreg use.
892     if (MO.getSubReg())
893       return true;
894     if (MO.isDef())
895       MRInfo |= (unsigned)VirtRegMap::isMod;
896     else {
897       // Filter out two-address use operand(s).
898       if (MI->isRegTiedToDefOperand(OpIdx)) {
899         MRInfo = VirtRegMap::isModRef;
900         continue;
901       }
902       MRInfo |= (unsigned)VirtRegMap::isRef;
903     }
904     FoldOps.push_back(OpIdx);
905   }
906   return false;
907 }
908 
909 
910 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
911 /// slot / to reg or any rematerialized load into ith operand of specified
912 /// MI. If it is successul, MI is updated with the newly created MI and
913 /// returns true.
tryFoldMemoryOperand(MachineInstr * & MI,VirtRegMap & vrm,MachineInstr * DefMI,SlotIndex InstrIdx,SmallVector<unsigned,2> & Ops,bool isSS,int Slot,unsigned Reg)914 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
915                                          VirtRegMap &vrm, MachineInstr *DefMI,
916                                          SlotIndex InstrIdx,
917                                          SmallVector<unsigned, 2> &Ops,
918                                          bool isSS, int Slot, unsigned Reg) {
919   // If it is an implicit def instruction, just delete it.
920   if (MI->isImplicitDef()) {
921     RemoveMachineInstrFromMaps(MI);
922     vrm.RemoveMachineInstrFromMaps(MI);
923     MI->eraseFromParent();
924     ++numFolds;
925     return true;
926   }
927 
928   // Filter the list of operand indexes that are to be folded. Abort if
929   // any operand will prevent folding.
930   unsigned MRInfo = 0;
931   SmallVector<unsigned, 2> FoldOps;
932   if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
933     return false;
934 
935   // The only time it's safe to fold into a two address instruction is when
936   // it's folding reload and spill from / into a spill stack slot.
937   if (DefMI && (MRInfo & VirtRegMap::isMod))
938     return false;
939 
940   MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
941                            : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
942   if (fmi) {
943     // Remember this instruction uses the spill slot.
944     if (isSS) vrm.addSpillSlotUse(Slot, fmi);
945 
946     // Attempt to fold the memory reference into the instruction. If
947     // we can do this, we don't need to insert spill code.
948     if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
949       vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
950     vrm.transferSpillPts(MI, fmi);
951     vrm.transferRestorePts(MI, fmi);
952     vrm.transferEmergencySpills(MI, fmi);
953     ReplaceMachineInstrInMaps(MI, fmi);
954     MI->eraseFromParent();
955     MI = fmi;
956     ++numFolds;
957     return true;
958   }
959   return false;
960 }
961 
962 /// canFoldMemoryOperand - Returns true if the specified load / store
963 /// folding is possible.
canFoldMemoryOperand(MachineInstr * MI,SmallVector<unsigned,2> & Ops,bool ReMat) const964 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
965                                          SmallVector<unsigned, 2> &Ops,
966                                          bool ReMat) const {
967   // Filter the list of operand indexes that are to be folded. Abort if
968   // any operand will prevent folding.
969   unsigned MRInfo = 0;
970   SmallVector<unsigned, 2> FoldOps;
971   if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
972     return false;
973 
974   // It's only legal to remat for a use, not a def.
975   if (ReMat && (MRInfo & VirtRegMap::isMod))
976     return false;
977 
978   return tii_->canFoldMemoryOperand(MI, FoldOps);
979 }
980 
intervalIsInOneMBB(const LiveInterval & li) const981 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
982   LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
983 
984   MachineBasicBlock *mbb =  indexes_->getMBBCoveringRange(itr->start, itr->end);
985 
986   if (mbb == 0)
987     return false;
988 
989   for (++itr; itr != li.ranges.end(); ++itr) {
990     MachineBasicBlock *mbb2 =
991       indexes_->getMBBCoveringRange(itr->start, itr->end);
992 
993     if (mbb2 != mbb)
994       return false;
995   }
996 
997   return true;
998 }
999 
1000 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1001 /// interval on to-be re-materialized operands of MI) with new register.
rewriteImplicitOps(const LiveInterval & li,MachineInstr * MI,unsigned NewVReg,VirtRegMap & vrm)1002 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1003                                        MachineInstr *MI, unsigned NewVReg,
1004                                        VirtRegMap &vrm) {
1005   // There is an implicit use. That means one of the other operand is
1006   // being remat'ed and the remat'ed instruction has li.reg as an
1007   // use operand. Make sure we rewrite that as well.
1008   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1009     MachineOperand &MO = MI->getOperand(i);
1010     if (!MO.isReg())
1011       continue;
1012     unsigned Reg = MO.getReg();
1013     if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1014       continue;
1015     if (!vrm.isReMaterialized(Reg))
1016       continue;
1017     MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1018     MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1019     if (UseMO)
1020       UseMO->setReg(NewVReg);
1021   }
1022 }
1023 
1024 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1025 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1026 bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval & li,const VNInfo * VNI,bool TrySplit,SlotIndex index,SlotIndex end,MachineInstr * MI,MachineInstr * ReMatOrigDefMI,MachineInstr * ReMatDefMI,unsigned Slot,int LdSlot,bool isLoad,bool isLoadSS,bool DefIsReMat,bool CanDelete,VirtRegMap & vrm,const TargetRegisterClass * rc,SmallVector<int,4> & ReMatIds,const MachineLoopInfo * loopInfo,unsigned & NewVReg,unsigned ImpUse,bool & HasDef,bool & HasUse,DenseMap<unsigned,unsigned> & MBBVRegsMap,std::vector<LiveInterval * > & NewLIs)1027 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1028                  bool TrySplit, SlotIndex index, SlotIndex end,
1029                  MachineInstr *MI,
1030                  MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1031                  unsigned Slot, int LdSlot,
1032                  bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1033                  VirtRegMap &vrm,
1034                  const TargetRegisterClass* rc,
1035                  SmallVector<int, 4> &ReMatIds,
1036                  const MachineLoopInfo *loopInfo,
1037                  unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1038                  DenseMap<unsigned,unsigned> &MBBVRegsMap,
1039                  std::vector<LiveInterval*> &NewLIs) {
1040   bool CanFold = false;
1041  RestartInstruction:
1042   for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1043     MachineOperand& mop = MI->getOperand(i);
1044     if (!mop.isReg())
1045       continue;
1046     unsigned Reg = mop.getReg();
1047     if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1048       continue;
1049     if (Reg != li.reg)
1050       continue;
1051 
1052     bool TryFold = !DefIsReMat;
1053     bool FoldSS = true; // Default behavior unless it's a remat.
1054     int FoldSlot = Slot;
1055     if (DefIsReMat) {
1056       // If this is the rematerializable definition MI itself and
1057       // all of its uses are rematerialized, simply delete it.
1058       if (MI == ReMatOrigDefMI && CanDelete) {
1059         DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1060                      << *MI << '\n');
1061         RemoveMachineInstrFromMaps(MI);
1062         vrm.RemoveMachineInstrFromMaps(MI);
1063         MI->eraseFromParent();
1064         break;
1065       }
1066 
1067       // If def for this use can't be rematerialized, then try folding.
1068       // If def is rematerializable and it's a load, also try folding.
1069       TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1070       if (isLoad) {
1071         // Try fold loads (from stack slot, constant pool, etc.) into uses.
1072         FoldSS = isLoadSS;
1073         FoldSlot = LdSlot;
1074       }
1075     }
1076 
1077     // Scan all of the operands of this instruction rewriting operands
1078     // to use NewVReg instead of li.reg as appropriate.  We do this for
1079     // two reasons:
1080     //
1081     //   1. If the instr reads the same spilled vreg multiple times, we
1082     //      want to reuse the NewVReg.
1083     //   2. If the instr is a two-addr instruction, we are required to
1084     //      keep the src/dst regs pinned.
1085     //
1086     // Keep track of whether we replace a use and/or def so that we can
1087     // create the spill interval with the appropriate range.
1088     SmallVector<unsigned, 2> Ops;
1089     tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1090 
1091     // Create a new virtual register for the spill interval.
1092     // Create the new register now so we can map the fold instruction
1093     // to the new register so when it is unfolded we get the correct
1094     // answer.
1095     bool CreatedNewVReg = false;
1096     if (NewVReg == 0) {
1097       NewVReg = mri_->createVirtualRegister(rc);
1098       vrm.grow();
1099       CreatedNewVReg = true;
1100 
1101       // The new virtual register should get the same allocation hints as the
1102       // old one.
1103       std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1104       if (Hint.first || Hint.second)
1105         mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1106     }
1107 
1108     if (!TryFold)
1109       CanFold = false;
1110     else {
1111       // Do not fold load / store here if we are splitting. We'll find an
1112       // optimal point to insert a load / store later.
1113       if (!TrySplit) {
1114         if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1115                                  Ops, FoldSS, FoldSlot, NewVReg)) {
1116           // Folding the load/store can completely change the instruction in
1117           // unpredictable ways, rescan it from the beginning.
1118 
1119           if (FoldSS) {
1120             // We need to give the new vreg the same stack slot as the
1121             // spilled interval.
1122             vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1123           }
1124 
1125           HasUse = false;
1126           HasDef = false;
1127           CanFold = false;
1128           if (isNotInMIMap(MI))
1129             break;
1130           goto RestartInstruction;
1131         }
1132       } else {
1133         // We'll try to fold it later if it's profitable.
1134         CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1135       }
1136     }
1137 
1138     mop.setReg(NewVReg);
1139     if (mop.isImplicit())
1140       rewriteImplicitOps(li, MI, NewVReg, vrm);
1141 
1142     // Reuse NewVReg for other reads.
1143     for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1144       MachineOperand &mopj = MI->getOperand(Ops[j]);
1145       mopj.setReg(NewVReg);
1146       if (mopj.isImplicit())
1147         rewriteImplicitOps(li, MI, NewVReg, vrm);
1148     }
1149 
1150     if (CreatedNewVReg) {
1151       if (DefIsReMat) {
1152         vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1153         if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1154           // Each valnum may have its own remat id.
1155           ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1156         } else {
1157           vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1158         }
1159         if (!CanDelete || (HasUse && HasDef)) {
1160           // If this is a two-addr instruction then its use operands are
1161           // rematerializable but its def is not. It should be assigned a
1162           // stack slot.
1163           vrm.assignVirt2StackSlot(NewVReg, Slot);
1164         }
1165       } else {
1166         vrm.assignVirt2StackSlot(NewVReg, Slot);
1167       }
1168     } else if (HasUse && HasDef &&
1169                vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1170       // If this interval hasn't been assigned a stack slot (because earlier
1171       // def is a deleted remat def), do it now.
1172       assert(Slot != VirtRegMap::NO_STACK_SLOT);
1173       vrm.assignVirt2StackSlot(NewVReg, Slot);
1174     }
1175 
1176     // Re-matting an instruction with virtual register use. Add the
1177     // register as an implicit use on the use MI.
1178     if (DefIsReMat && ImpUse)
1179       MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1180 
1181     // Create a new register interval for this spill / remat.
1182     LiveInterval &nI = getOrCreateInterval(NewVReg);
1183     if (CreatedNewVReg) {
1184       NewLIs.push_back(&nI);
1185       MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1186       if (TrySplit)
1187         vrm.setIsSplitFromReg(NewVReg, li.reg);
1188     }
1189 
1190     if (HasUse) {
1191       if (CreatedNewVReg) {
1192         LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1193                      nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1194         DEBUG(dbgs() << " +" << LR);
1195         nI.addRange(LR);
1196       } else {
1197         // Extend the split live interval to this def / use.
1198         SlotIndex End = index.getDefIndex();
1199         LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1200                      nI.getValNumInfo(nI.getNumValNums()-1));
1201         DEBUG(dbgs() << " +" << LR);
1202         nI.addRange(LR);
1203       }
1204     }
1205     if (HasDef) {
1206       LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1207                    nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1208       DEBUG(dbgs() << " +" << LR);
1209       nI.addRange(LR);
1210     }
1211 
1212     DEBUG({
1213         dbgs() << "\t\t\t\tAdded new interval: ";
1214         nI.print(dbgs(), tri_);
1215         dbgs() << '\n';
1216       });
1217   }
1218   return CanFold;
1219 }
anyKillInMBBAfterIdx(const LiveInterval & li,const VNInfo * VNI,MachineBasicBlock * MBB,SlotIndex Idx) const1220 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1221                                    const VNInfo *VNI,
1222                                    MachineBasicBlock *MBB,
1223                                    SlotIndex Idx) const {
1224   return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1225 }
1226 
1227 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1228 /// during spilling.
1229 namespace {
1230   struct RewriteInfo {
1231     SlotIndex Index;
1232     MachineInstr *MI;
RewriteInfo__anond98bf3590111::RewriteInfo1233     RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1234   };
1235 
1236   struct RewriteInfoCompare {
operator ()__anond98bf3590111::RewriteInfoCompare1237     bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1238       return LHS.Index < RHS.Index;
1239     }
1240   };
1241 }
1242 
1243 void LiveIntervals::
rewriteInstructionsForSpills(const LiveInterval & li,bool TrySplit,LiveInterval::Ranges::const_iterator & I,MachineInstr * ReMatOrigDefMI,MachineInstr * ReMatDefMI,unsigned Slot,int LdSlot,bool isLoad,bool isLoadSS,bool DefIsReMat,bool CanDelete,VirtRegMap & vrm,const TargetRegisterClass * rc,SmallVector<int,4> & ReMatIds,const MachineLoopInfo * loopInfo,BitVector & SpillMBBs,DenseMap<unsigned,std::vector<SRInfo>> & SpillIdxes,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes,DenseMap<unsigned,unsigned> & MBBVRegsMap,std::vector<LiveInterval * > & NewLIs)1244 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1245                     LiveInterval::Ranges::const_iterator &I,
1246                     MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1247                     unsigned Slot, int LdSlot,
1248                     bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1249                     VirtRegMap &vrm,
1250                     const TargetRegisterClass* rc,
1251                     SmallVector<int, 4> &ReMatIds,
1252                     const MachineLoopInfo *loopInfo,
1253                     BitVector &SpillMBBs,
1254                     DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1255                     BitVector &RestoreMBBs,
1256                     DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1257                     DenseMap<unsigned,unsigned> &MBBVRegsMap,
1258                     std::vector<LiveInterval*> &NewLIs) {
1259   bool AllCanFold = true;
1260   unsigned NewVReg = 0;
1261   SlotIndex start = I->start.getBaseIndex();
1262   SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1263 
1264   // First collect all the def / use in this live range that will be rewritten.
1265   // Make sure they are sorted according to instruction index.
1266   std::vector<RewriteInfo> RewriteMIs;
1267   for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1268          re = mri_->reg_end(); ri != re; ) {
1269     MachineInstr *MI = &*ri;
1270     MachineOperand &O = ri.getOperand();
1271     ++ri;
1272     if (MI->isDebugValue()) {
1273       // Modify DBG_VALUE now that the value is in a spill slot.
1274       if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1275         uint64_t Offset = MI->getOperand(1).getImm();
1276         const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1277         DebugLoc DL = MI->getDebugLoc();
1278         int FI = isLoadSS ? LdSlot : (int)Slot;
1279         if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1280                                                            Offset, MDPtr, DL)) {
1281           DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1282           ReplaceMachineInstrInMaps(MI, NewDV);
1283           MachineBasicBlock *MBB = MI->getParent();
1284           MBB->insert(MBB->erase(MI), NewDV);
1285           continue;
1286         }
1287       }
1288 
1289       DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1290       RemoveMachineInstrFromMaps(MI);
1291       vrm.RemoveMachineInstrFromMaps(MI);
1292       MI->eraseFromParent();
1293       continue;
1294     }
1295     assert(!(O.isImplicit() && O.isUse()) &&
1296            "Spilling register that's used as implicit use?");
1297     SlotIndex index = getInstructionIndex(MI);
1298     if (index < start || index >= end)
1299       continue;
1300 
1301     if (O.isUndef())
1302       // Must be defined by an implicit def. It should not be spilled. Note,
1303       // this is for correctness reason. e.g.
1304       // 8   %reg1024<def> = IMPLICIT_DEF
1305       // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1306       // The live range [12, 14) are not part of the r1024 live interval since
1307       // it's defined by an implicit def. It will not conflicts with live
1308       // interval of r1025. Now suppose both registers are spilled, you can
1309       // easily see a situation where both registers are reloaded before
1310       // the INSERT_SUBREG and both target registers that would overlap.
1311       continue;
1312     RewriteMIs.push_back(RewriteInfo(index, MI));
1313   }
1314   std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1315 
1316   unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1317   // Now rewrite the defs and uses.
1318   for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1319     RewriteInfo &rwi = RewriteMIs[i];
1320     ++i;
1321     SlotIndex index = rwi.Index;
1322     MachineInstr *MI = rwi.MI;
1323     // If MI def and/or use the same register multiple times, then there
1324     // are multiple entries.
1325     while (i != e && RewriteMIs[i].MI == MI) {
1326       assert(RewriteMIs[i].Index == index);
1327       ++i;
1328     }
1329     MachineBasicBlock *MBB = MI->getParent();
1330 
1331     if (ImpUse && MI != ReMatDefMI) {
1332       // Re-matting an instruction with virtual register use. Prevent interval
1333       // from being spilled.
1334       getInterval(ImpUse).markNotSpillable();
1335     }
1336 
1337     unsigned MBBId = MBB->getNumber();
1338     unsigned ThisVReg = 0;
1339     if (TrySplit) {
1340       DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1341       if (NVI != MBBVRegsMap.end()) {
1342         ThisVReg = NVI->second;
1343         // One common case:
1344         // x = use
1345         // ...
1346         // ...
1347         // def = ...
1348         //     = use
1349         // It's better to start a new interval to avoid artifically
1350         // extend the new interval.
1351         if (MI->readsWritesVirtualRegister(li.reg) ==
1352             std::make_pair(false,true)) {
1353           MBBVRegsMap.erase(MBB->getNumber());
1354           ThisVReg = 0;
1355         }
1356       }
1357     }
1358 
1359     bool IsNew = ThisVReg == 0;
1360     if (IsNew) {
1361       // This ends the previous live interval. If all of its def / use
1362       // can be folded, give it a low spill weight.
1363       if (NewVReg && TrySplit && AllCanFold) {
1364         LiveInterval &nI = getOrCreateInterval(NewVReg);
1365         nI.weight /= 10.0F;
1366       }
1367       AllCanFold = true;
1368     }
1369     NewVReg = ThisVReg;
1370 
1371     bool HasDef = false;
1372     bool HasUse = false;
1373     bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1374                          index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1375                          Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1376                          CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1377                          ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1378     if (!HasDef && !HasUse)
1379       continue;
1380 
1381     AllCanFold &= CanFold;
1382 
1383     // Update weight of spill interval.
1384     LiveInterval &nI = getOrCreateInterval(NewVReg);
1385     if (!TrySplit) {
1386       // The spill weight is now infinity as it cannot be spilled again.
1387       nI.markNotSpillable();
1388       continue;
1389     }
1390 
1391     // Keep track of the last def and first use in each MBB.
1392     if (HasDef) {
1393       if (MI != ReMatOrigDefMI || !CanDelete) {
1394         bool HasKill = false;
1395         if (!HasUse)
1396           HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1397         else {
1398           // If this is a two-address code, then this index starts a new VNInfo.
1399           const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1400           if (VNI)
1401             HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1402         }
1403         DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1404           SpillIdxes.find(MBBId);
1405         if (!HasKill) {
1406           if (SII == SpillIdxes.end()) {
1407             std::vector<SRInfo> S;
1408             S.push_back(SRInfo(index, NewVReg, true));
1409             SpillIdxes.insert(std::make_pair(MBBId, S));
1410           } else if (SII->second.back().vreg != NewVReg) {
1411             SII->second.push_back(SRInfo(index, NewVReg, true));
1412           } else if (index > SII->second.back().index) {
1413             // If there is an earlier def and this is a two-address
1414             // instruction, then it's not possible to fold the store (which
1415             // would also fold the load).
1416             SRInfo &Info = SII->second.back();
1417             Info.index = index;
1418             Info.canFold = !HasUse;
1419           }
1420           SpillMBBs.set(MBBId);
1421         } else if (SII != SpillIdxes.end() &&
1422                    SII->second.back().vreg == NewVReg &&
1423                    index > SII->second.back().index) {
1424           // There is an earlier def that's not killed (must be two-address).
1425           // The spill is no longer needed.
1426           SII->second.pop_back();
1427           if (SII->second.empty()) {
1428             SpillIdxes.erase(MBBId);
1429             SpillMBBs.reset(MBBId);
1430           }
1431         }
1432       }
1433     }
1434 
1435     if (HasUse) {
1436       DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1437         SpillIdxes.find(MBBId);
1438       if (SII != SpillIdxes.end() &&
1439           SII->second.back().vreg == NewVReg &&
1440           index > SII->second.back().index)
1441         // Use(s) following the last def, it's not safe to fold the spill.
1442         SII->second.back().canFold = false;
1443       DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1444         RestoreIdxes.find(MBBId);
1445       if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1446         // If we are splitting live intervals, only fold if it's the first
1447         // use and there isn't another use later in the MBB.
1448         RII->second.back().canFold = false;
1449       else if (IsNew) {
1450         // Only need a reload if there isn't an earlier def / use.
1451         if (RII == RestoreIdxes.end()) {
1452           std::vector<SRInfo> Infos;
1453           Infos.push_back(SRInfo(index, NewVReg, true));
1454           RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1455         } else {
1456           RII->second.push_back(SRInfo(index, NewVReg, true));
1457         }
1458         RestoreMBBs.set(MBBId);
1459       }
1460     }
1461 
1462     // Update spill weight.
1463     unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1464     nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1465   }
1466 
1467   if (NewVReg && TrySplit && AllCanFold) {
1468     // If all of its def / use can be folded, give it a low spill weight.
1469     LiveInterval &nI = getOrCreateInterval(NewVReg);
1470     nI.weight /= 10.0F;
1471   }
1472 }
1473 
alsoFoldARestore(int Id,SlotIndex index,unsigned vr,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes)1474 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1475                         unsigned vr, BitVector &RestoreMBBs,
1476                         DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1477   if (!RestoreMBBs[Id])
1478     return false;
1479   std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1480   for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1481     if (Restores[i].index == index &&
1482         Restores[i].vreg == vr &&
1483         Restores[i].canFold)
1484       return true;
1485   return false;
1486 }
1487 
eraseRestoreInfo(int Id,SlotIndex index,unsigned vr,BitVector & RestoreMBBs,DenseMap<unsigned,std::vector<SRInfo>> & RestoreIdxes)1488 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1489                         unsigned vr, BitVector &RestoreMBBs,
1490                         DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1491   if (!RestoreMBBs[Id])
1492     return;
1493   std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1494   for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1495     if (Restores[i].index == index && Restores[i].vreg)
1496       Restores[i].index = SlotIndex();
1497 }
1498 
1499 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1500 /// spilled and create empty intervals for their uses.
1501 void
handleSpilledImpDefs(const LiveInterval & li,VirtRegMap & vrm,const TargetRegisterClass * rc,std::vector<LiveInterval * > & NewLIs)1502 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1503                                     const TargetRegisterClass* rc,
1504                                     std::vector<LiveInterval*> &NewLIs) {
1505   for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1506          re = mri_->reg_end(); ri != re; ) {
1507     MachineOperand &O = ri.getOperand();
1508     MachineInstr *MI = &*ri;
1509     ++ri;
1510     if (MI->isDebugValue()) {
1511       // Remove debug info for now.
1512       O.setReg(0U);
1513       DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1514       continue;
1515     }
1516     if (O.isDef()) {
1517       assert(MI->isImplicitDef() &&
1518              "Register def was not rewritten?");
1519       RemoveMachineInstrFromMaps(MI);
1520       vrm.RemoveMachineInstrFromMaps(MI);
1521       MI->eraseFromParent();
1522     } else {
1523       // This must be an use of an implicit_def so it's not part of the live
1524       // interval. Create a new empty live interval for it.
1525       // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1526       unsigned NewVReg = mri_->createVirtualRegister(rc);
1527       vrm.grow();
1528       vrm.setIsImplicitlyDefined(NewVReg);
1529       NewLIs.push_back(&getOrCreateInterval(NewVReg));
1530       for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1531         MachineOperand &MO = MI->getOperand(i);
1532         if (MO.isReg() && MO.getReg() == li.reg) {
1533           MO.setReg(NewVReg);
1534           MO.setIsUndef();
1535         }
1536       }
1537     }
1538   }
1539 }
1540 
1541 float
getSpillWeight(bool isDef,bool isUse,unsigned loopDepth)1542 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1543   // Limit the loop depth ridiculousness.
1544   if (loopDepth > 200)
1545     loopDepth = 200;
1546 
1547   // The loop depth is used to roughly estimate the number of times the
1548   // instruction is executed. Something like 10^d is simple, but will quickly
1549   // overflow a float. This expression behaves like 10^d for small d, but is
1550   // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1551   // headroom before overflow.
1552   float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1553 
1554   return (isDef + isUse) * lc;
1555 }
1556 
1557 void
normalizeSpillWeights(std::vector<LiveInterval * > & NewLIs)1558 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1559   for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1560     normalizeSpillWeight(*NewLIs[i]);
1561 }
1562 
1563 std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval & li,SmallVectorImpl<LiveInterval * > & SpillIs,const MachineLoopInfo * loopInfo,VirtRegMap & vrm)1564 addIntervalsForSpills(const LiveInterval &li,
1565                       SmallVectorImpl<LiveInterval*> &SpillIs,
1566                       const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1567   assert(li.isSpillable() && "attempt to spill already spilled interval!");
1568 
1569   DEBUG({
1570       dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1571       li.print(dbgs(), tri_);
1572       dbgs() << '\n';
1573     });
1574 
1575   // Each bit specify whether a spill is required in the MBB.
1576   BitVector SpillMBBs(mf_->getNumBlockIDs());
1577   DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1578   BitVector RestoreMBBs(mf_->getNumBlockIDs());
1579   DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1580   DenseMap<unsigned,unsigned> MBBVRegsMap;
1581   std::vector<LiveInterval*> NewLIs;
1582   const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1583 
1584   unsigned NumValNums = li.getNumValNums();
1585   SmallVector<MachineInstr*, 4> ReMatDefs;
1586   ReMatDefs.resize(NumValNums, NULL);
1587   SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1588   ReMatOrigDefs.resize(NumValNums, NULL);
1589   SmallVector<int, 4> ReMatIds;
1590   ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1591   BitVector ReMatDelete(NumValNums);
1592   unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1593 
1594   // Spilling a split live interval. It cannot be split any further. Also,
1595   // it's also guaranteed to be a single val# / range interval.
1596   if (vrm.getPreSplitReg(li.reg)) {
1597     vrm.setIsSplitFromReg(li.reg, 0);
1598     // Unset the split kill marker on the last use.
1599     SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1600     if (KillIdx != SlotIndex()) {
1601       MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1602       assert(KillMI && "Last use disappeared?");
1603       int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1604       assert(KillOp != -1 && "Last use disappeared?");
1605       KillMI->getOperand(KillOp).setIsKill(false);
1606     }
1607     vrm.removeKillPoint(li.reg);
1608     bool DefIsReMat = vrm.isReMaterialized(li.reg);
1609     Slot = vrm.getStackSlot(li.reg);
1610     assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1611     MachineInstr *ReMatDefMI = DefIsReMat ?
1612       vrm.getReMaterializedMI(li.reg) : NULL;
1613     int LdSlot = 0;
1614     bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1615     bool isLoad = isLoadSS ||
1616       (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1617     bool IsFirstRange = true;
1618     for (LiveInterval::Ranges::const_iterator
1619            I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1620       // If this is a split live interval with multiple ranges, it means there
1621       // are two-address instructions that re-defined the value. Only the
1622       // first def can be rematerialized!
1623       if (IsFirstRange) {
1624         // Note ReMatOrigDefMI has already been deleted.
1625         rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1626                              Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1627                              false, vrm, rc, ReMatIds, loopInfo,
1628                              SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1629                              MBBVRegsMap, NewLIs);
1630       } else {
1631         rewriteInstructionsForSpills(li, false, I, NULL, 0,
1632                              Slot, 0, false, false, false,
1633                              false, vrm, rc, ReMatIds, loopInfo,
1634                              SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635                              MBBVRegsMap, NewLIs);
1636       }
1637       IsFirstRange = false;
1638     }
1639 
1640     handleSpilledImpDefs(li, vrm, rc, NewLIs);
1641     normalizeSpillWeights(NewLIs);
1642     return NewLIs;
1643   }
1644 
1645   bool TrySplit = !intervalIsInOneMBB(li);
1646   if (TrySplit)
1647     ++numSplits;
1648   bool NeedStackSlot = false;
1649   for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1650        i != e; ++i) {
1651     const VNInfo *VNI = *i;
1652     unsigned VN = VNI->id;
1653     if (VNI->isUnused())
1654       continue; // Dead val#.
1655     // Is the def for the val# rematerializable?
1656     MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1657       ? getInstructionFromIndex(VNI->def) : 0;
1658     bool dummy;
1659     if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1660       // Remember how to remat the def of this val#.
1661       ReMatOrigDefs[VN] = ReMatDefMI;
1662       // Original def may be modified so we have to make a copy here.
1663       MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1664       CloneMIs.push_back(Clone);
1665       ReMatDefs[VN] = Clone;
1666 
1667       bool CanDelete = true;
1668       if (VNI->hasPHIKill()) {
1669         // A kill is a phi node, not all of its uses can be rematerialized.
1670         // It must not be deleted.
1671         CanDelete = false;
1672         // Need a stack slot if there is any live range where uses cannot be
1673         // rematerialized.
1674         NeedStackSlot = true;
1675       }
1676       if (CanDelete)
1677         ReMatDelete.set(VN);
1678     } else {
1679       // Need a stack slot if there is any live range where uses cannot be
1680       // rematerialized.
1681       NeedStackSlot = true;
1682     }
1683   }
1684 
1685   // One stack slot per live interval.
1686   if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1687     if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1688       Slot = vrm.assignVirt2StackSlot(li.reg);
1689 
1690     // This case only occurs when the prealloc splitter has already assigned
1691     // a stack slot to this vreg.
1692     else
1693       Slot = vrm.getStackSlot(li.reg);
1694   }
1695 
1696   // Create new intervals and rewrite defs and uses.
1697   for (LiveInterval::Ranges::const_iterator
1698          I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1699     MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1700     MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1701     bool DefIsReMat = ReMatDefMI != NULL;
1702     bool CanDelete = ReMatDelete[I->valno->id];
1703     int LdSlot = 0;
1704     bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1705     bool isLoad = isLoadSS ||
1706       (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1707     rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1708                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1709                                CanDelete, vrm, rc, ReMatIds, loopInfo,
1710                                SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1711                                MBBVRegsMap, NewLIs);
1712   }
1713 
1714   // Insert spills / restores if we are splitting.
1715   if (!TrySplit) {
1716     handleSpilledImpDefs(li, vrm, rc, NewLIs);
1717     normalizeSpillWeights(NewLIs);
1718     return NewLIs;
1719   }
1720 
1721   SmallPtrSet<LiveInterval*, 4> AddedKill;
1722   SmallVector<unsigned, 2> Ops;
1723   if (NeedStackSlot) {
1724     int Id = SpillMBBs.find_first();
1725     while (Id != -1) {
1726       std::vector<SRInfo> &spills = SpillIdxes[Id];
1727       for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1728         SlotIndex index = spills[i].index;
1729         unsigned VReg = spills[i].vreg;
1730         LiveInterval &nI = getOrCreateInterval(VReg);
1731         bool isReMat = vrm.isReMaterialized(VReg);
1732         MachineInstr *MI = getInstructionFromIndex(index);
1733         bool CanFold = false;
1734         bool FoundUse = false;
1735         Ops.clear();
1736         if (spills[i].canFold) {
1737           CanFold = true;
1738           for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1739             MachineOperand &MO = MI->getOperand(j);
1740             if (!MO.isReg() || MO.getReg() != VReg)
1741               continue;
1742 
1743             Ops.push_back(j);
1744             if (MO.isDef())
1745               continue;
1746             if (isReMat ||
1747                 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1748                                                 RestoreMBBs, RestoreIdxes))) {
1749               // MI has two-address uses of the same register. If the use
1750               // isn't the first and only use in the BB, then we can't fold
1751               // it. FIXME: Move this to rewriteInstructionsForSpills.
1752               CanFold = false;
1753               break;
1754             }
1755             FoundUse = true;
1756           }
1757         }
1758         // Fold the store into the def if possible.
1759         bool Folded = false;
1760         if (CanFold && !Ops.empty()) {
1761           if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1762             Folded = true;
1763             if (FoundUse) {
1764               // Also folded uses, do not issue a load.
1765               eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1766               nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1767             }
1768             nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1769           }
1770         }
1771 
1772         // Otherwise tell the spiller to issue a spill.
1773         if (!Folded) {
1774           LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1775           bool isKill = LR->end == index.getStoreIndex();
1776           if (!MI->registerDefIsDead(nI.reg))
1777             // No need to spill a dead def.
1778             vrm.addSpillPoint(VReg, isKill, MI);
1779           if (isKill)
1780             AddedKill.insert(&nI);
1781         }
1782       }
1783       Id = SpillMBBs.find_next(Id);
1784     }
1785   }
1786 
1787   int Id = RestoreMBBs.find_first();
1788   while (Id != -1) {
1789     std::vector<SRInfo> &restores = RestoreIdxes[Id];
1790     for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1791       SlotIndex index = restores[i].index;
1792       if (index == SlotIndex())
1793         continue;
1794       unsigned VReg = restores[i].vreg;
1795       LiveInterval &nI = getOrCreateInterval(VReg);
1796       bool isReMat = vrm.isReMaterialized(VReg);
1797       MachineInstr *MI = getInstructionFromIndex(index);
1798       bool CanFold = false;
1799       Ops.clear();
1800       if (restores[i].canFold) {
1801         CanFold = true;
1802         for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1803           MachineOperand &MO = MI->getOperand(j);
1804           if (!MO.isReg() || MO.getReg() != VReg)
1805             continue;
1806 
1807           if (MO.isDef()) {
1808             // If this restore were to be folded, it would have been folded
1809             // already.
1810             CanFold = false;
1811             break;
1812           }
1813           Ops.push_back(j);
1814         }
1815       }
1816 
1817       // Fold the load into the use if possible.
1818       bool Folded = false;
1819       if (CanFold && !Ops.empty()) {
1820         if (!isReMat)
1821           Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1822         else {
1823           MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1824           int LdSlot = 0;
1825           bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1826           // If the rematerializable def is a load, also try to fold it.
1827           if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1828             Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1829                                           Ops, isLoadSS, LdSlot, VReg);
1830           if (!Folded) {
1831             unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1832             if (ImpUse) {
1833               // Re-matting an instruction with virtual register use. Add the
1834               // register as an implicit use on the use MI and mark the register
1835               // interval as unspillable.
1836               LiveInterval &ImpLi = getInterval(ImpUse);
1837               ImpLi.markNotSpillable();
1838               MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1839             }
1840           }
1841         }
1842       }
1843       // If folding is not possible / failed, then tell the spiller to issue a
1844       // load / rematerialization for us.
1845       if (Folded)
1846         nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1847       else
1848         vrm.addRestorePoint(VReg, MI);
1849     }
1850     Id = RestoreMBBs.find_next(Id);
1851   }
1852 
1853   // Finalize intervals: add kills, finalize spill weights, and filter out
1854   // dead intervals.
1855   std::vector<LiveInterval*> RetNewLIs;
1856   for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1857     LiveInterval *LI = NewLIs[i];
1858     if (!LI->empty()) {
1859       if (!AddedKill.count(LI)) {
1860         LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1861         SlotIndex LastUseIdx = LR->end.getBaseIndex();
1862         MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1863         int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1864         assert(UseIdx != -1);
1865         if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1866           LastUse->getOperand(UseIdx).setIsKill();
1867           vrm.addKillPoint(LI->reg, LastUseIdx);
1868         }
1869       }
1870       RetNewLIs.push_back(LI);
1871     }
1872   }
1873 
1874   handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1875   normalizeSpillWeights(RetNewLIs);
1876   return RetNewLIs;
1877 }
1878 
1879 /// hasAllocatableSuperReg - Return true if the specified physical register has
1880 /// any super register that's allocatable.
hasAllocatableSuperReg(unsigned Reg) const1881 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1882   for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1883     if (allocatableRegs_[*AS] && hasInterval(*AS))
1884       return true;
1885   return false;
1886 }
1887 
1888 /// getRepresentativeReg - Find the largest super register of the specified
1889 /// physical register.
getRepresentativeReg(unsigned Reg) const1890 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1891   // Find the largest super-register that is allocatable.
1892   unsigned BestReg = Reg;
1893   for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1894     unsigned SuperReg = *AS;
1895     if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1896       BestReg = SuperReg;
1897       break;
1898     }
1899   }
1900   return BestReg;
1901 }
1902 
1903 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1904 /// specified interval that conflicts with the specified physical register.
getNumConflictsWithPhysReg(const LiveInterval & li,unsigned PhysReg) const1905 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1906                                                    unsigned PhysReg) const {
1907   unsigned NumConflicts = 0;
1908   const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1909   for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1910          E = mri_->reg_end(); I != E; ++I) {
1911     MachineOperand &O = I.getOperand();
1912     MachineInstr *MI = O.getParent();
1913     if (MI->isDebugValue())
1914       continue;
1915     SlotIndex Index = getInstructionIndex(MI);
1916     if (pli.liveAt(Index))
1917       ++NumConflicts;
1918   }
1919   return NumConflicts;
1920 }
1921 
1922 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1923 /// around all defs and uses of the specified interval. Return true if it
1924 /// was able to cut its interval.
spillPhysRegAroundRegDefsUses(const LiveInterval & li,unsigned PhysReg,VirtRegMap & vrm)1925 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1926                                             unsigned PhysReg, VirtRegMap &vrm) {
1927   unsigned SpillReg = getRepresentativeReg(PhysReg);
1928 
1929   for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1930     // If there are registers which alias PhysReg, but which are not a
1931     // sub-register of the chosen representative super register. Assert
1932     // since we can't handle it yet.
1933     assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1934            tri_->isSuperRegister(*AS, SpillReg));
1935 
1936   bool Cut = false;
1937   SmallVector<unsigned, 4> PRegs;
1938   if (hasInterval(SpillReg))
1939     PRegs.push_back(SpillReg);
1940   else {
1941     SmallSet<unsigned, 4> Added;
1942     for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1943       if (Added.insert(*AS) && hasInterval(*AS)) {
1944         PRegs.push_back(*AS);
1945         for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1946           Added.insert(*ASS);
1947       }
1948   }
1949 
1950   SmallPtrSet<MachineInstr*, 8> SeenMIs;
1951   for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1952          E = mri_->reg_end(); I != E; ++I) {
1953     MachineOperand &O = I.getOperand();
1954     MachineInstr *MI = O.getParent();
1955     if (MI->isDebugValue() || SeenMIs.count(MI))
1956       continue;
1957     SeenMIs.insert(MI);
1958     SlotIndex Index = getInstructionIndex(MI);
1959     for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1960       unsigned PReg = PRegs[i];
1961       LiveInterval &pli = getInterval(PReg);
1962       if (!pli.liveAt(Index))
1963         continue;
1964       vrm.addEmergencySpill(PReg, MI);
1965       SlotIndex StartIdx = Index.getLoadIndex();
1966       SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1967       if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1968         pli.removeRange(StartIdx, EndIdx);
1969         Cut = true;
1970       } else {
1971         std::string msg;
1972         raw_string_ostream Msg(msg);
1973         Msg << "Ran out of registers during register allocation!";
1974         if (MI->isInlineAsm()) {
1975           Msg << "\nPlease check your inline asm statement for invalid "
1976               << "constraints:\n";
1977           MI->print(Msg, tm_);
1978         }
1979         report_fatal_error(Msg.str());
1980       }
1981       for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1982         if (!hasInterval(*AS))
1983           continue;
1984         LiveInterval &spli = getInterval(*AS);
1985         if (spli.liveAt(Index))
1986           spli.removeRange(Index.getLoadIndex(),
1987                            Index.getNextIndex().getBaseIndex());
1988       }
1989     }
1990   }
1991   return Cut;
1992 }
1993 
addLiveRangeToEndOfBlock(unsigned reg,MachineInstr * startInst)1994 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1995                                                   MachineInstr* startInst) {
1996   LiveInterval& Interval = getOrCreateInterval(reg);
1997   VNInfo* VN = Interval.getNextValue(
1998     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1999     startInst, true, getVNInfoAllocator());
2000   VN->setHasPHIKill(true);
2001   LiveRange LR(
2002      SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2003      getMBBEndIdx(startInst->getParent()), VN);
2004   Interval.addRange(LR);
2005 
2006   return LR;
2007 }
2008 
2009