1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/BacktrackingAllocator.h"
8 
9 #include <algorithm>
10 
11 #include "jit/BitSet.h"
12 #include "jit/CompileInfo.h"
13 #include "js/Printf.h"
14 
15 using namespace js;
16 using namespace js::jit;
17 
18 using mozilla::DebugOnly;
19 
20 /////////////////////////////////////////////////////////////////////
21 // Utility
22 /////////////////////////////////////////////////////////////////////
23 
SortBefore(UsePosition * a,UsePosition * b)24 static inline bool SortBefore(UsePosition* a, UsePosition* b) {
25   return a->pos <= b->pos;
26 }
27 
SortBefore(LiveRange::BundleLink * a,LiveRange::BundleLink * b)28 static inline bool SortBefore(LiveRange::BundleLink* a,
29                               LiveRange::BundleLink* b) {
30   LiveRange* rangea = LiveRange::get(a);
31   LiveRange* rangeb = LiveRange::get(b);
32   MOZ_ASSERT(!rangea->intersects(rangeb));
33   return rangea->from() < rangeb->from();
34 }
35 
SortBefore(LiveRange::RegisterLink * a,LiveRange::RegisterLink * b)36 static inline bool SortBefore(LiveRange::RegisterLink* a,
37                               LiveRange::RegisterLink* b) {
38   return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
39 }
40 
41 template <typename T>
InsertSortedList(InlineForwardList<T> & list,T * value)42 static inline void InsertSortedList(InlineForwardList<T>& list, T* value) {
43   if (list.empty()) {
44     list.pushFront(value);
45     return;
46   }
47 
48   if (SortBefore(list.back(), value)) {
49     list.pushBack(value);
50     return;
51   }
52 
53   T* prev = nullptr;
54   for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
55     if (SortBefore(value, *iter)) {
56       break;
57     }
58     prev = *iter;
59   }
60 
61   if (prev) {
62     list.insertAfter(prev, value);
63   } else {
64     list.pushFront(value);
65   }
66 }
67 
CanMergeTypesInBundle(LDefinition::Type a,LDefinition::Type b)68 static bool CanMergeTypesInBundle(LDefinition::Type a, LDefinition::Type b) {
69   // Fast path for the common case.
70   if (a == b) {
71     return true;
72   }
73 
74   // Only merge if the sizes match, so that we don't get confused about the
75   // width of spill slots.
76   return StackSlotAllocator::width(a) == StackSlotAllocator::width(b);
77 }
78 
79 /////////////////////////////////////////////////////////////////////
80 // LiveRange
81 /////////////////////////////////////////////////////////////////////
82 
noteAddedUse(UsePosition * use)83 inline void LiveRange::noteAddedUse(UsePosition* use) {
84   LUse::Policy policy = use->usePolicy();
85   usesSpillWeight_ += BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
86   if (policy == LUse::FIXED) {
87     ++numFixedUses_;
88   }
89 }
90 
noteRemovedUse(UsePosition * use)91 inline void LiveRange::noteRemovedUse(UsePosition* use) {
92   LUse::Policy policy = use->usePolicy();
93   usesSpillWeight_ -= BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
94   if (policy == LUse::FIXED) {
95     --numFixedUses_;
96   }
97   MOZ_ASSERT_IF(!hasUses(), !usesSpillWeight_ && !numFixedUses_);
98 }
99 
addUse(UsePosition * use)100 void LiveRange::addUse(UsePosition* use) {
101   MOZ_ASSERT(covers(use->pos));
102   InsertSortedList(uses_, use);
103   noteAddedUse(use);
104 }
105 
popUse()106 UsePosition* LiveRange::popUse() {
107   UsePosition* ret = uses_.popFront();
108   noteRemovedUse(ret);
109   return ret;
110 }
111 
distributeUses(LiveRange * other)112 void LiveRange::distributeUses(LiveRange* other) {
113   MOZ_ASSERT(other->vreg() == vreg());
114   MOZ_ASSERT(this != other);
115 
116   // Move over all uses which fit in |other|'s boundaries.
117   for (UsePositionIterator iter = usesBegin(); iter;) {
118     UsePosition* use = *iter;
119     if (other->covers(use->pos)) {
120       uses_.removeAndIncrement(iter);
121       noteRemovedUse(use);
122       other->addUse(use);
123     } else {
124       iter++;
125     }
126   }
127 
128   // Distribute the definition to |other| as well, if possible.
129   if (hasDefinition() && from() == other->from()) {
130     other->setHasDefinition();
131   }
132 }
133 
contains(LiveRange * other) const134 bool LiveRange::contains(LiveRange* other) const {
135   return from() <= other->from() && to() >= other->to();
136 }
137 
intersect(LiveRange * other,Range * pre,Range * inside,Range * post) const138 void LiveRange::intersect(LiveRange* other, Range* pre, Range* inside,
139                           Range* post) const {
140   MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
141 
142   CodePosition innerFrom = from();
143   if (from() < other->from()) {
144     if (to() < other->from()) {
145       *pre = range_;
146       return;
147     }
148     *pre = Range(from(), other->from());
149     innerFrom = other->from();
150   }
151 
152   CodePosition innerTo = to();
153   if (to() > other->to()) {
154     if (from() >= other->to()) {
155       *post = range_;
156       return;
157     }
158     *post = Range(other->to(), to());
159     innerTo = other->to();
160   }
161 
162   if (innerFrom != innerTo) {
163     *inside = Range(innerFrom, innerTo);
164   }
165 }
166 
intersects(LiveRange * other) const167 bool LiveRange::intersects(LiveRange* other) const {
168   Range pre, inside, post;
169   intersect(other, &pre, &inside, &post);
170   return !inside.empty();
171 }
172 
173 /////////////////////////////////////////////////////////////////////
174 // SpillSet
175 /////////////////////////////////////////////////////////////////////
176 
setAllocation(LAllocation alloc)177 void SpillSet::setAllocation(LAllocation alloc) {
178   for (size_t i = 0; i < numSpilledBundles(); i++) {
179     spilledBundle(i)->setAllocation(alloc);
180   }
181 }
182 
183 /////////////////////////////////////////////////////////////////////
184 // LiveBundle
185 /////////////////////////////////////////////////////////////////////
186 
187 #ifdef DEBUG
numRanges() const188 size_t LiveBundle::numRanges() const {
189   size_t count = 0;
190   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
191     count++;
192   }
193   return count;
194 }
195 #endif  // DEBUG
196 
rangeFor(CodePosition pos) const197 LiveRange* LiveBundle::rangeFor(CodePosition pos) const {
198   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
199     LiveRange* range = LiveRange::get(*iter);
200     if (range->covers(pos)) {
201       return range;
202     }
203   }
204   return nullptr;
205 }
206 
addRange(LiveRange * range)207 void LiveBundle::addRange(LiveRange* range) {
208   MOZ_ASSERT(!range->bundle());
209   range->setBundle(this);
210   InsertSortedList(ranges_, &range->bundleLink);
211 }
212 
addRange(TempAllocator & alloc,uint32_t vreg,CodePosition from,CodePosition to)213 bool LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg,
214                           CodePosition from, CodePosition to) {
215   LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
216   if (!range) {
217     return false;
218   }
219   addRange(range);
220   return true;
221 }
222 
addRangeAndDistributeUses(TempAllocator & alloc,LiveRange * oldRange,CodePosition from,CodePosition to)223 bool LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc,
224                                            LiveRange* oldRange,
225                                            CodePosition from, CodePosition to) {
226   LiveRange* range = LiveRange::FallibleNew(alloc, oldRange->vreg(), from, to);
227   if (!range) {
228     return false;
229   }
230   addRange(range);
231   oldRange->distributeUses(range);
232   return true;
233 }
234 
popFirstRange()235 LiveRange* LiveBundle::popFirstRange() {
236   LiveRange::BundleLinkIterator iter = rangesBegin();
237   if (!iter) {
238     return nullptr;
239   }
240 
241   LiveRange* range = LiveRange::get(*iter);
242   ranges_.removeAt(iter);
243 
244   range->setBundle(nullptr);
245   return range;
246 }
247 
removeRange(LiveRange * range)248 void LiveBundle::removeRange(LiveRange* range) {
249   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
250     LiveRange* existing = LiveRange::get(*iter);
251     if (existing == range) {
252       ranges_.removeAt(iter);
253       return;
254     }
255   }
256   MOZ_CRASH();
257 }
258 
259 /////////////////////////////////////////////////////////////////////
260 // VirtualRegister
261 /////////////////////////////////////////////////////////////////////
262 
addInitialRange(TempAllocator & alloc,CodePosition from,CodePosition to,size_t * numRanges)263 bool VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from,
264                                       CodePosition to, size_t* numRanges) {
265   MOZ_ASSERT(from < to);
266 
267   // Mark [from,to) as a live range for this register during the initial
268   // liveness analysis, coalescing with any existing overlapping ranges.
269 
270   // On some pathological graphs there might be a huge number of different
271   // live ranges. Allow non-overlapping live range to be merged if the
272   // number of ranges exceeds the cap below.
273   static const size_t CoalesceLimit = 100000;
274 
275   LiveRange* prev = nullptr;
276   LiveRange* merged = nullptr;
277   for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter;) {
278     LiveRange* existing = LiveRange::get(*iter);
279 
280     if (from > existing->to() && *numRanges < CoalesceLimit) {
281       // The new range should go after this one.
282       prev = existing;
283       iter++;
284       continue;
285     }
286 
287     if (to.next() < existing->from()) {
288       // The new range should go before this one.
289       break;
290     }
291 
292     if (!merged) {
293       // This is the first old range we've found that overlaps the new
294       // range. Extend this one to cover its union with the new range.
295       merged = existing;
296 
297       if (from < existing->from()) {
298         existing->setFrom(from);
299       }
300       if (to > existing->to()) {
301         existing->setTo(to);
302       }
303 
304       // Continue searching to see if any other old ranges can be
305       // coalesced with the new merged range.
306       iter++;
307       continue;
308     }
309 
310     // Coalesce this range into the previous range we merged into.
311     MOZ_ASSERT(existing->from() >= merged->from());
312     if (existing->to() > merged->to()) {
313       merged->setTo(existing->to());
314     }
315 
316     MOZ_ASSERT(!existing->hasDefinition());
317     existing->distributeUses(merged);
318     MOZ_ASSERT(!existing->hasUses());
319 
320     ranges_.removeAndIncrement(iter);
321   }
322 
323   if (!merged) {
324     // The new range does not overlap any existing range for the vreg.
325     LiveRange* range = LiveRange::FallibleNew(alloc, vreg(), from, to);
326     if (!range) {
327       return false;
328     }
329 
330     if (prev) {
331       ranges_.insertAfter(&prev->registerLink, &range->registerLink);
332     } else {
333       ranges_.pushFront(&range->registerLink);
334     }
335 
336     (*numRanges)++;
337   }
338 
339   return true;
340 }
341 
addInitialUse(UsePosition * use)342 void VirtualRegister::addInitialUse(UsePosition* use) {
343   LiveRange::get(*rangesBegin())->addUse(use);
344 }
345 
setInitialDefinition(CodePosition from)346 void VirtualRegister::setInitialDefinition(CodePosition from) {
347   LiveRange* first = LiveRange::get(*rangesBegin());
348   MOZ_ASSERT(from >= first->from());
349   first->setFrom(from);
350   first->setHasDefinition();
351 }
352 
rangeFor(CodePosition pos,bool preferRegister) const353 LiveRange* VirtualRegister::rangeFor(CodePosition pos,
354                                      bool preferRegister /* = false */) const {
355   LiveRange* found = nullptr;
356   for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
357     LiveRange* range = LiveRange::get(*iter);
358     if (range->covers(pos)) {
359       if (!preferRegister || range->bundle()->allocation().isRegister()) {
360         return range;
361       }
362       if (!found) {
363         found = range;
364       }
365     }
366   }
367   return found;
368 }
369 
addRange(LiveRange * range)370 void VirtualRegister::addRange(LiveRange* range) {
371   InsertSortedList(ranges_, &range->registerLink);
372 }
373 
removeRange(LiveRange * range)374 void VirtualRegister::removeRange(LiveRange* range) {
375   for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
376     LiveRange* existing = LiveRange::get(*iter);
377     if (existing == range) {
378       ranges_.removeAt(iter);
379       return;
380     }
381   }
382   MOZ_CRASH();
383 }
384 
385 /////////////////////////////////////////////////////////////////////
386 // BacktrackingAllocator
387 /////////////////////////////////////////////////////////////////////
388 
389 // This function pre-allocates and initializes as much global state as possible
390 // to avoid littering the algorithms with memory management cruft.
init()391 bool BacktrackingAllocator::init() {
392   if (!RegisterAllocator::init()) {
393     return false;
394   }
395 
396   liveIn = mir->allocate<BitSet>(graph.numBlockIds());
397   if (!liveIn) {
398     return false;
399   }
400 
401   size_t numVregs = graph.numVirtualRegisters();
402   if (!vregs.init(mir->alloc(), numVregs)) {
403     return false;
404   }
405   for (uint32_t i = 0; i < numVregs; i++) {
406     new (&vregs[i]) VirtualRegister();
407   }
408 
409   // Build virtual register objects.
410   for (size_t i = 0; i < graph.numBlocks(); i++) {
411     if (mir->shouldCancel("Create data structures (main loop)")) {
412       return false;
413     }
414 
415     LBlock* block = graph.getBlock(i);
416     for (LInstructionIterator ins = block->begin(); ins != block->end();
417          ins++) {
418       if (mir->shouldCancel("Create data structures (inner loop 1)")) {
419         return false;
420       }
421 
422       for (size_t j = 0; j < ins->numDefs(); j++) {
423         LDefinition* def = ins->getDef(j);
424         if (def->isBogusTemp()) {
425           continue;
426         }
427         vreg(def).init(*ins, def, /* isTemp = */ false);
428       }
429 
430       for (size_t j = 0; j < ins->numTemps(); j++) {
431         LDefinition* def = ins->getTemp(j);
432         if (def->isBogusTemp()) {
433           continue;
434         }
435         vreg(def).init(*ins, def, /* isTemp = */ true);
436       }
437     }
438     for (size_t j = 0; j < block->numPhis(); j++) {
439       LPhi* phi = block->getPhi(j);
440       LDefinition* def = phi->getDef(0);
441       vreg(def).init(phi, def, /* isTemp = */ false);
442     }
443   }
444 
445   LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
446   while (!remainingRegisters.emptyGeneral()) {
447     AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
448     registers[reg.code()].allocatable = true;
449   }
450   while (!remainingRegisters.emptyFloat()) {
451     AnyRegister reg =
452         AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());
453     registers[reg.code()].allocatable = true;
454   }
455 
456   LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
457   for (size_t i = 0; i < AnyRegister::Total; i++) {
458     registers[i].reg = AnyRegister::FromCode(i);
459     registers[i].allocations.setAllocator(lifoAlloc);
460   }
461 
462   hotcode.setAllocator(lifoAlloc);
463   callRanges.setAllocator(lifoAlloc);
464 
465   // Partition the graph into hot and cold sections, for helping to make
466   // splitting decisions. Since we don't have any profiling data this is a
467   // crapshoot, so just mark the bodies of inner loops as hot and everything
468   // else as cold.
469 
470   LBlock* backedge = nullptr;
471   for (size_t i = 0; i < graph.numBlocks(); i++) {
472     LBlock* block = graph.getBlock(i);
473 
474     // If we see a loop header, mark the backedge so we know when we have
475     // hit the end of the loop. Don't process the loop immediately, so that
476     // if there is an inner loop we will ignore the outer backedge.
477     if (block->mir()->isLoopHeader()) {
478       backedge = block->mir()->backedge()->lir();
479     }
480 
481     if (block == backedge) {
482       LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
483       LiveRange* range = LiveRange::FallibleNew(alloc(), 0, entryOf(header),
484                                                 exitOf(block).next());
485       if (!range || !hotcode.insert(range)) {
486         return false;
487       }
488     }
489   }
490 
491   return true;
492 }
493 
addInitialFixedRange(AnyRegister reg,CodePosition from,CodePosition to)494 bool BacktrackingAllocator::addInitialFixedRange(AnyRegister reg,
495                                                  CodePosition from,
496                                                  CodePosition to) {
497   LiveRange* range = LiveRange::FallibleNew(alloc(), 0, from, to);
498   return range && registers[reg.code()].allocations.insert(range);
499 }
500 
501 #ifdef DEBUG
502 // Returns true iff ins has a def/temp reusing the input allocation.
IsInputReused(LInstruction * ins,LUse * use)503 static bool IsInputReused(LInstruction* ins, LUse* use) {
504   for (size_t i = 0; i < ins->numDefs(); i++) {
505     if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
506         ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) {
507       return true;
508     }
509   }
510 
511   for (size_t i = 0; i < ins->numTemps(); i++) {
512     if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
513         ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) {
514       return true;
515     }
516   }
517 
518   return false;
519 }
520 #endif
521 
522 /*
523  * This function builds up liveness ranges for all virtual registers
524  * defined in the function.
525  *
526  * The algorithm is based on the one published in:
527  *
528  * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
529  *     SSA Form." Proceedings of the International Symposium on Code Generation
530  *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
531  *
532  * The algorithm operates on blocks ordered such that dominators of a block
533  * are before the block itself, and such that all blocks of a loop are
534  * contiguous. It proceeds backwards over the instructions in this order,
535  * marking registers live at their uses, ending their live ranges at
536  * definitions, and recording which registers are live at the top of every
537  * block. To deal with loop backedges, registers live at the beginning of
538  * a loop gain a range covering the entire loop.
539  */
buildLivenessInfo()540 bool BacktrackingAllocator::buildLivenessInfo() {
541   JitSpewCont(JitSpew_RegAlloc, "\n");
542   JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
543 
544   Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
545   BitSet loopDone(graph.numBlockIds());
546   if (!loopDone.init(alloc())) {
547     return false;
548   }
549 
550   size_t numRanges = 0;
551 
552   for (size_t i = graph.numBlocks(); i > 0; i--) {
553     if (mir->shouldCancel("Build Liveness Info (main loop)")) {
554       return false;
555     }
556 
557     LBlock* block = graph.getBlock(i - 1);
558     MBasicBlock* mblock = block->mir();
559 
560     BitSet& live = liveIn[mblock->id()];
561     new (&live) BitSet(graph.numVirtualRegisters());
562     if (!live.init(alloc())) {
563       return false;
564     }
565 
566     // Propagate liveIn from our successors to us.
567     for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
568       MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
569       // Skip backedges, as we fix them up at the loop header.
570       if (mblock->id() < successor->id()) {
571         live.insertAll(liveIn[successor->id()]);
572       }
573     }
574 
575     // Add successor phis.
576     if (mblock->successorWithPhis()) {
577       LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
578       for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
579         LPhi* phi = phiSuccessor->getPhi(j);
580         LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
581         uint32_t reg = use->toUse()->virtualRegister();
582         live.insert(reg);
583         vreg(use).setUsedByPhi();
584       }
585     }
586 
587     // Registers are assumed alive for the entire block, a define shortens
588     // the range to the point of definition.
589     for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
590       if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block),
591                                              exitOf(block).next(), &numRanges))
592         return false;
593     }
594 
595     // Shorten the front end of ranges for live variables to their point of
596     // definition, if found.
597     for (LInstructionReverseIterator ins = block->rbegin();
598          ins != block->rend(); ins++) {
599       // Calls may clobber registers, so force a spill and reload around the
600       // callsite.
601       if (ins->isCall()) {
602         for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more();
603              ++iter) {
604           bool found = false;
605           for (size_t i = 0; i < ins->numDefs(); i++) {
606             if (ins->getDef(i)->isFixed() &&
607                 ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
608               found = true;
609               break;
610             }
611           }
612           // If this register doesn't have an explicit def above, mark
613           // it as clobbered by the call unless it is actually
614           // call-preserved.
615           if (!found && !ins->isCallPreserved(*iter)) {
616             if (!addInitialFixedRange(*iter, outputOf(*ins),
617                                       outputOf(*ins).next())) {
618               return false;
619             }
620           }
621         }
622 
623         CallRange* callRange = new (alloc().fallible())
624             CallRange(outputOf(*ins), outputOf(*ins).next());
625         if (!callRange) {
626           return false;
627         }
628 
629         callRangesList.pushFront(callRange);
630         if (!callRanges.insert(callRange)) {
631           return false;
632         }
633       }
634 
635       for (size_t i = 0; i < ins->numDefs(); i++) {
636         LDefinition* def = ins->getDef(i);
637         if (def->isBogusTemp()) {
638           continue;
639         }
640 
641         CodePosition from = outputOf(*ins);
642 
643         if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
644           // MUST_REUSE_INPUT is implemented by allocating an output
645           // register and moving the input to it. Register hints are
646           // used to avoid unnecessary moves. We give the input an
647           // LUse::ANY policy to avoid allocating a register for the
648           // input.
649           LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
650           MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
651           MOZ_ASSERT(inputUse->usedAtStart());
652           *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY,
653                            /* usedAtStart = */ true);
654         }
655 
656         if (!vreg(def).addInitialRange(alloc(), from, from.next(),
657                                        &numRanges)) {
658           return false;
659         }
660         vreg(def).setInitialDefinition(from);
661         live.remove(def->virtualRegister());
662       }
663 
664       for (size_t i = 0; i < ins->numTemps(); i++) {
665         LDefinition* temp = ins->getTemp(i);
666         if (temp->isBogusTemp()) {
667           continue;
668         }
669 
670         // Normally temps are considered to cover both the input
671         // and output of the associated instruction. In some cases
672         // though we want to use a fixed register as both an input
673         // and clobbered register in the instruction, so watch for
674         // this and shorten the temp to cover only the output.
675         CodePosition from = inputOf(*ins);
676         if (temp->policy() == LDefinition::FIXED) {
677           AnyRegister reg = temp->output()->toRegister();
678           for (LInstruction::InputIterator alloc(**ins); alloc.more();
679                alloc.next()) {
680             if (alloc->isUse()) {
681               LUse* use = alloc->toUse();
682               if (use->isFixedRegister()) {
683                 if (GetFixedRegister(vreg(use).def(), use) == reg) {
684                   from = outputOf(*ins);
685                 }
686               }
687             }
688           }
689         }
690 
691         // * For non-call instructions, temps cover both the input and output,
692         //   so temps never alias uses (even at-start uses) or defs.
693         // * For call instructions, temps only cover the input (the output is
694         //   used for the force-spill ranges added above). This means temps
695         //   still don't alias uses but they can alias the (fixed) defs. For now
696         //   we conservatively require temps to have a fixed register for call
697         //   instructions to prevent a footgun.
698         MOZ_ASSERT_IF(ins->isCall(), temp->policy() == LDefinition::FIXED);
699         CodePosition to =
700             ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
701 
702         if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges)) {
703           return false;
704         }
705         vreg(temp).setInitialDefinition(from);
706       }
707 
708       DebugOnly<bool> hasUseRegister = false;
709       DebugOnly<bool> hasUseRegisterAtStart = false;
710 
711       for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more();
712            inputAlloc.next()) {
713         if (inputAlloc->isUse()) {
714           LUse* use = inputAlloc->toUse();
715 
716           // Call uses should always be at-start, since calls use all
717           // registers.
718           MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
719                         use->usedAtStart());
720 
721 #ifdef DEBUG
722           // If there are both useRegisterAtStart(x) and useRegister(y)
723           // uses, we may assign the same register to both operands
724           // (bug 772830). Don't allow this for now.
725           if (use->policy() == LUse::REGISTER) {
726             if (use->usedAtStart()) {
727               if (!IsInputReused(*ins, use)) {
728                 hasUseRegisterAtStart = true;
729               }
730             } else {
731               hasUseRegister = true;
732             }
733           }
734           MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
735 #endif
736 
737           // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
738           if (use->policy() == LUse::RECOVERED_INPUT) {
739             continue;
740           }
741 
742           CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
743           if (use->isFixedRegister()) {
744             LAllocation reg(AnyRegister::FromCode(use->registerCode()));
745             for (size_t i = 0; i < ins->numDefs(); i++) {
746               LDefinition* def = ins->getDef(i);
747               if (def->policy() == LDefinition::FIXED &&
748                   *def->output() == reg) {
749                 to = inputOf(*ins);
750               }
751             }
752           }
753 
754           if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(),
755                                          &numRanges)) {
756             return false;
757           }
758           UsePosition* usePosition =
759               new (alloc().fallible()) UsePosition(use, to);
760           if (!usePosition) {
761             return false;
762           }
763           vreg(use).addInitialUse(usePosition);
764           live.insert(use->virtualRegister());
765         }
766       }
767     }
768 
769     // Phis have simultaneous assignment semantics at block begin, so at
770     // the beginning of the block we can be sure that liveIn does not
771     // contain any phi outputs.
772     for (unsigned int i = 0; i < block->numPhis(); i++) {
773       LDefinition* def = block->getPhi(i)->getDef(0);
774       if (live.contains(def->virtualRegister())) {
775         live.remove(def->virtualRegister());
776       } else {
777         // This is a dead phi, so add a dummy range over all phis. This
778         // can go away if we have an earlier dead code elimination pass.
779         CodePosition entryPos = entryOf(block);
780         if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(),
781                                        &numRanges)) {
782           return false;
783         }
784       }
785     }
786 
787     if (mblock->isLoopHeader()) {
788       // A divergence from the published algorithm is required here, as
789       // our block order does not guarantee that blocks of a loop are
790       // contiguous. As a result, a single live range spanning the
791       // loop is not possible. Additionally, we require liveIn in a later
792       // pass for resolution, so that must also be fixed up here.
793       MBasicBlock* loopBlock = mblock->backedge();
794       while (true) {
795         // Blocks must already have been visited to have a liveIn set.
796         MOZ_ASSERT(loopBlock->id() >= mblock->id());
797 
798         // Add a range for this entire loop block
799         CodePosition from = entryOf(loopBlock->lir());
800         CodePosition to = exitOf(loopBlock->lir()).next();
801 
802         for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
803           if (!vregs[*liveRegId].addInitialRange(alloc(), from, to,
804                                                  &numRanges)) {
805             return false;
806           }
807         }
808 
809         // Fix up the liveIn set.
810         liveIn[loopBlock->id()].insertAll(live);
811 
812         // Make sure we don't visit this node again
813         loopDone.insert(loopBlock->id());
814 
815         // If this is the loop header, any predecessors are either the
816         // backedge or out of the loop, so skip any predecessors of
817         // this block
818         if (loopBlock != mblock) {
819           for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
820             MBasicBlock* pred = loopBlock->getPredecessor(i);
821             if (loopDone.contains(pred->id())) {
822               continue;
823             }
824             if (!loopWorkList.append(pred)) {
825               return false;
826             }
827           }
828         }
829 
830         // Terminate loop if out of work.
831         if (loopWorkList.empty()) {
832           break;
833         }
834 
835         // Grab the next block off the work list, skipping any OSR block.
836         MBasicBlock* osrBlock = graph.mir().osrBlock();
837         while (!loopWorkList.empty()) {
838           loopBlock = loopWorkList.popCopy();
839           if (loopBlock != osrBlock) {
840             break;
841           }
842         }
843 
844         // If end is reached without finding a non-OSR block, then no more work
845         // items were found.
846         if (loopBlock == osrBlock) {
847           MOZ_ASSERT(loopWorkList.empty());
848           break;
849         }
850       }
851 
852       // Clear the done set for other loops
853       loopDone.clear();
854     }
855 
856     MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
857   }
858 
859   JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
860 
861   if (JitSpewEnabled(JitSpew_RegAlloc)) {
862     dumpInstructions();
863   }
864 
865   return true;
866 }
867 
go()868 bool BacktrackingAllocator::go() {
869   JitSpewCont(JitSpew_RegAlloc, "\n");
870   JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
871 
872   if (!init()) {
873     return false;
874   }
875 
876   if (!buildLivenessInfo()) {
877     return false;
878   }
879 
880   if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) {
881     return false;
882   }
883 
884   JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
885   if (!mergeAndQueueRegisters()) {
886     return false;
887   }
888 
889   if (JitSpewEnabled(JitSpew_RegAlloc)) {
890     dumpVregs("after grouping/queueing regs");
891   }
892 
893   JitSpewCont(JitSpew_RegAlloc, "\n");
894   JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
895 
896   // Allocate, spill and split bundles until finished.
897   while (!allocationQueue.empty()) {
898     if (mir->shouldCancel("Backtracking Allocation")) {
899       return false;
900     }
901 
902     QueueItem item = allocationQueue.removeHighest();
903     if (!processBundle(mir, item.bundle)) {
904       return false;
905     }
906   }
907 
908   JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
909 
910   if (!tryAllocatingRegistersForSpillBundles()) {
911     return false;
912   }
913 
914   if (!pickStackSlots()) {
915     return false;
916   }
917 
918   if (JitSpewEnabled(JitSpew_RegAlloc)) {
919     JitSpewCont(JitSpew_RegAlloc, "\n");
920     dumpAllocations();
921   }
922 
923   if (!resolveControlFlow()) {
924     return false;
925   }
926 
927   if (!reifyAllocations()) {
928     return false;
929   }
930 
931   if (!populateSafepoints()) {
932     return false;
933   }
934 
935   if (!annotateMoveGroups()) {
936     return false;
937   }
938 
939   JitSpewCont(JitSpew_RegAlloc, "\n");
940   JitSpew(JitSpew_RegAlloc, "Finished register allocation");
941 
942   return true;
943 }
944 
IsArgumentSlotDefinition(LDefinition * def)945 static bool IsArgumentSlotDefinition(LDefinition* def) {
946   return def->policy() == LDefinition::FIXED && def->output()->isArgument();
947 }
948 
IsThisSlotDefinition(LDefinition * def)949 static bool IsThisSlotDefinition(LDefinition* def) {
950   return IsArgumentSlotDefinition(def) &&
951          def->output()->toArgument()->index() <
952              THIS_FRAME_ARGSLOT + sizeof(Value);
953 }
954 
HasStackPolicy(LDefinition * def)955 static bool HasStackPolicy(LDefinition* def) {
956   return def->policy() == LDefinition::STACK;
957 }
958 
tryMergeBundles(LiveBundle * bundle0,LiveBundle * bundle1)959 bool BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0,
960                                             LiveBundle* bundle1) {
961   // See if bundle0 and bundle1 can be merged together.
962   if (bundle0 == bundle1) {
963     return true;
964   }
965 
966   // Get a representative virtual register from each bundle.
967   VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()];
968   VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()];
969 
970   MOZ_ASSERT(CanMergeTypesInBundle(reg0.type(), reg1.type()));
971   MOZ_ASSERT(reg0.isCompatible(reg1));
972 
973   // Registers which might spill to the frame's |this| slot can only be
974   // grouped with other such registers. The frame's |this| slot must always
975   // hold the |this| value, as required by JitFrame tracing and by the Ion
976   // constructor calling convention.
977   if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
978     if (*reg0.def()->output() != *reg1.def()->output()) {
979       return true;
980     }
981   }
982 
983   // Registers which might spill to the frame's argument slots can only be
984   // grouped with other such registers if the frame might access those
985   // arguments through a lazy arguments object or rest parameter.
986   if (IsArgumentSlotDefinition(reg0.def()) ||
987       IsArgumentSlotDefinition(reg1.def())) {
988     if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
989       if (*reg0.def()->output() != *reg1.def()->output()) {
990         return true;
991       }
992     }
993   }
994 
995   // When we make a call to a WebAssembly function that returns multiple
996   // results, some of those results can go on the stack.  The callee is passed a
997   // pointer to this stack area, which is represented as having policy
998   // LDefinition::STACK (with type LDefinition::STACKRESULTS).  Individual
999   // results alias parts of the stack area with a value-appropriate type, but
1000   // policy LDefinition::STACK.  This aliasing between allocations makes it
1001   // unsound to merge anything with a LDefinition::STACK policy.
1002   if (HasStackPolicy(reg0.def()) || HasStackPolicy(reg1.def())) {
1003     return true;
1004   }
1005 
1006   // Limit the number of times we compare ranges if there are many ranges in
1007   // one of the bundles, to avoid quadratic behavior.
1008   static const size_t MAX_RANGES = 200;
1009 
1010   // Make sure that ranges in the bundles do not overlap.
1011   LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(),
1012                                 iter1 = bundle1->rangesBegin();
1013   size_t count = 0;
1014   while (iter0 && iter1) {
1015     if (++count >= MAX_RANGES) {
1016       return true;
1017     }
1018 
1019     LiveRange* range0 = LiveRange::get(*iter0);
1020     LiveRange* range1 = LiveRange::get(*iter1);
1021 
1022     if (range0->from() >= range1->to()) {
1023       iter1++;
1024     } else if (range1->from() >= range0->to()) {
1025       iter0++;
1026     } else {
1027       return true;
1028     }
1029   }
1030 
1031   // Move all ranges from bundle1 into bundle0.
1032   while (LiveRange* range = bundle1->popFirstRange()) {
1033     bundle0->addRange(range);
1034   }
1035 
1036   return true;
1037 }
1038 
FindReusingDefOrTemp(LNode * node,LAllocation * alloc)1039 static inline LDefinition* FindReusingDefOrTemp(LNode* node,
1040                                                 LAllocation* alloc) {
1041   if (node->isPhi()) {
1042     MOZ_ASSERT(node->toPhi()->numDefs() == 1);
1043     MOZ_ASSERT(node->toPhi()->getDef(0)->policy() !=
1044                LDefinition::MUST_REUSE_INPUT);
1045     return nullptr;
1046   }
1047 
1048   LInstruction* ins = node->toInstruction();
1049 
1050   for (size_t i = 0; i < ins->numDefs(); i++) {
1051     LDefinition* def = ins->getDef(i);
1052     if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
1053         ins->getOperand(def->getReusedInput()) == alloc) {
1054       return def;
1055     }
1056   }
1057   for (size_t i = 0; i < ins->numTemps(); i++) {
1058     LDefinition* def = ins->getTemp(i);
1059     if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
1060         ins->getOperand(def->getReusedInput()) == alloc) {
1061       return def;
1062     }
1063   }
1064   return nullptr;
1065 }
1066 
NumReusingDefs(LInstruction * ins)1067 static inline size_t NumReusingDefs(LInstruction* ins) {
1068   size_t num = 0;
1069   for (size_t i = 0; i < ins->numDefs(); i++) {
1070     LDefinition* def = ins->getDef(i);
1071     if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
1072       num++;
1073     }
1074   }
1075   return num;
1076 }
1077 
tryMergeReusedRegister(VirtualRegister & def,VirtualRegister & input)1078 bool BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def,
1079                                                    VirtualRegister& input) {
1080   // def is a vreg which reuses input for its output physical register. Try
1081   // to merge ranges for def with those of input if possible, as avoiding
1082   // copies before def's instruction is crucial for generated code quality
1083   // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
1084 
1085   if (def.rangeFor(inputOf(def.ins()))) {
1086     MOZ_ASSERT(def.isTemp());
1087     def.setMustCopyInput();
1088     return true;
1089   }
1090 
1091   if (!CanMergeTypesInBundle(def.type(), input.type())) {
1092     def.setMustCopyInput();
1093     return true;
1094   }
1095 
1096   LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
1097   if (!inputRange) {
1098     // The input is not live after the instruction, either in a safepoint
1099     // for the instruction or in subsequent code. The input and output
1100     // can thus be in the same group.
1101     return tryMergeBundles(def.firstBundle(), input.firstBundle());
1102   }
1103 
1104   // The input is live afterwards, either in future instructions or in a
1105   // safepoint for the reusing instruction. This is impossible to satisfy
1106   // without copying the input.
1107   //
1108   // It may or may not be better to split the input into two bundles at the
1109   // point of the definition, which may permit merging. One case where it is
1110   // definitely better to split is if the input never has any register uses
1111   // after the instruction. Handle this splitting eagerly.
1112 
1113   LBlock* block = def.ins()->block();
1114 
1115   // The input's lifetime must end within the same block as the definition,
1116   // otherwise it could live on in phis elsewhere.
1117   if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
1118     def.setMustCopyInput();
1119     return true;
1120   }
1121 
1122   // If we already split the input for some other register, don't make a
1123   // third bundle.
1124   if (inputRange->bundle() != input.firstRange()->bundle()) {
1125     def.setMustCopyInput();
1126     return true;
1127   }
1128 
1129   // If the input will start out in memory then adding a separate bundle for
1130   // memory uses after the def won't help.
1131   if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
1132     def.setMustCopyInput();
1133     return true;
1134   }
1135 
1136   // The input cannot have register or reused uses after the definition.
1137   for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
1138     if (iter->pos <= inputOf(def.ins())) {
1139       continue;
1140     }
1141 
1142     LUse* use = iter->use();
1143     if (FindReusingDefOrTemp(insData[iter->pos], use)) {
1144       def.setMustCopyInput();
1145       return true;
1146     }
1147     if (iter->usePolicy() != LUse::ANY &&
1148         iter->usePolicy() != LUse::KEEPALIVE) {
1149       def.setMustCopyInput();
1150       return true;
1151     }
1152   }
1153 
1154   LiveRange* preRange = LiveRange::FallibleNew(
1155       alloc(), input.vreg(), inputRange->from(), outputOf(def.ins()));
1156   if (!preRange) {
1157     return false;
1158   }
1159 
1160   // The new range starts at reg's input position, which means it overlaps
1161   // with the old range at one position. This is what we want, because we
1162   // need to copy the input before the instruction.
1163   LiveRange* postRange = LiveRange::FallibleNew(
1164       alloc(), input.vreg(), inputOf(def.ins()), inputRange->to());
1165   if (!postRange) {
1166     return false;
1167   }
1168 
1169   inputRange->distributeUses(preRange);
1170   inputRange->distributeUses(postRange);
1171   MOZ_ASSERT(!inputRange->hasUses());
1172 
1173   JitSpewIfEnabled(JitSpew_RegAlloc,
1174                    "  splitting reused input at %u to try to help grouping",
1175                    inputOf(def.ins()).bits());
1176 
1177   LiveBundle* firstBundle = inputRange->bundle();
1178   input.removeRange(inputRange);
1179   input.addRange(preRange);
1180   input.addRange(postRange);
1181 
1182   firstBundle->removeRange(inputRange);
1183   firstBundle->addRange(preRange);
1184 
1185   // The new range goes in a separate bundle, where it will be spilled during
1186   // allocation.
1187   LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
1188   if (!secondBundle) {
1189     return false;
1190   }
1191   secondBundle->addRange(postRange);
1192 
1193   return tryMergeBundles(def.firstBundle(), input.firstBundle());
1194 }
1195 
allocateStackDefinition(VirtualRegister & reg)1196 void BacktrackingAllocator::allocateStackDefinition(VirtualRegister& reg) {
1197   LInstruction* ins = reg.ins()->toInstruction();
1198   if (reg.def()->type() == LDefinition::STACKRESULTS) {
1199     LStackArea alloc(ins->toInstruction());
1200     stackSlotAllocator.allocateStackArea(&alloc);
1201     reg.def()->setOutput(alloc);
1202   } else {
1203     // Because the definitions are visited in order, the area has been allocated
1204     // before we reach this result, so we know the operand is an LStackArea.
1205     const LUse* use = ins->getOperand(0)->toUse();
1206     VirtualRegister& area = vregs[use->virtualRegister()];
1207     const LStackArea* areaAlloc = area.def()->output()->toStackArea();
1208     reg.def()->setOutput(areaAlloc->resultAlloc(ins, reg.def()));
1209   }
1210 }
1211 
mergeAndQueueRegisters()1212 bool BacktrackingAllocator::mergeAndQueueRegisters() {
1213   MOZ_ASSERT(!vregs[0u].hasRanges());
1214 
1215   // Create a bundle for each register containing all its ranges.
1216   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1217     VirtualRegister& reg = vregs[i];
1218     if (!reg.hasRanges()) {
1219       continue;
1220     }
1221 
1222     LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
1223     if (!bundle) {
1224       return false;
1225     }
1226     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1227          iter++) {
1228       LiveRange* range = LiveRange::get(*iter);
1229       bundle->addRange(range);
1230     }
1231   }
1232 
1233   // If there is an OSR block, merge parameters in that block with the
1234   // corresponding parameters in the initial block.
1235   if (MBasicBlock* osr = graph.mir().osrBlock()) {
1236     size_t original = 1;
1237     for (LInstructionIterator iter = osr->lir()->begin();
1238          iter != osr->lir()->end(); iter++) {
1239       if (iter->isParameter()) {
1240         for (size_t i = 0; i < iter->numDefs(); i++) {
1241           DebugOnly<bool> found = false;
1242           VirtualRegister& paramVreg = vreg(iter->getDef(i));
1243           for (; original < paramVreg.vreg(); original++) {
1244             VirtualRegister& originalVreg = vregs[original];
1245             if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
1246               MOZ_ASSERT(originalVreg.ins()->isParameter());
1247               if (!tryMergeBundles(originalVreg.firstBundle(),
1248                                    paramVreg.firstBundle())) {
1249                 return false;
1250               }
1251               found = true;
1252               break;
1253             }
1254           }
1255           MOZ_ASSERT(found);
1256         }
1257       }
1258     }
1259   }
1260 
1261   // Try to merge registers with their reused inputs.
1262   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1263     VirtualRegister& reg = vregs[i];
1264     if (!reg.hasRanges()) {
1265       continue;
1266     }
1267 
1268     if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
1269       LUse* use = reg.ins()
1270                       ->toInstruction()
1271                       ->getOperand(reg.def()->getReusedInput())
1272                       ->toUse();
1273       if (!tryMergeReusedRegister(reg, vreg(use))) {
1274         return false;
1275       }
1276     }
1277   }
1278 
1279   // Try to merge phis with their inputs.
1280   for (size_t i = 0; i < graph.numBlocks(); i++) {
1281     LBlock* block = graph.getBlock(i);
1282     for (size_t j = 0; j < block->numPhis(); j++) {
1283       LPhi* phi = block->getPhi(j);
1284       VirtualRegister& outputVreg = vreg(phi->getDef(0));
1285       for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
1286         VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
1287         if (!tryMergeBundles(inputVreg.firstBundle(),
1288                              outputVreg.firstBundle())) {
1289           return false;
1290         }
1291       }
1292     }
1293   }
1294 
1295   // Add all bundles to the allocation queue, and create spill sets for them.
1296   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1297     VirtualRegister& reg = vregs[i];
1298 
1299     // Eagerly allocate stack result areas and their component stack results.
1300     if (reg.def() && reg.def()->policy() == LDefinition::STACK) {
1301       allocateStackDefinition(reg);
1302     }
1303 
1304     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1305          iter++) {
1306       LiveRange* range = LiveRange::get(*iter);
1307       LiveBundle* bundle = range->bundle();
1308       if (range == bundle->firstRange()) {
1309         if (!alloc().ensureBallast()) {
1310           return false;
1311         }
1312 
1313         SpillSet* spill = SpillSet::New(alloc());
1314         if (!spill) {
1315           return false;
1316         }
1317         bundle->setSpillSet(spill);
1318 
1319         size_t priority = computePriority(bundle);
1320         if (!allocationQueue.insert(QueueItem(bundle, priority))) {
1321           return false;
1322         }
1323       }
1324     }
1325   }
1326 
1327   return true;
1328 }
1329 
1330 static const size_t MAX_ATTEMPTS = 2;
1331 
tryAllocateFixed(LiveBundle * bundle,Requirement requirement,bool * success,bool * pfixed,LiveBundleVector & conflicting)1332 bool BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle,
1333                                              Requirement requirement,
1334                                              bool* success, bool* pfixed,
1335                                              LiveBundleVector& conflicting) {
1336   // Spill bundles which are required to be in a certain stack slot.
1337   if (!requirement.allocation().isRegister()) {
1338     JitSpew(JitSpew_RegAlloc, "  stack allocation requirement");
1339     bundle->setAllocation(requirement.allocation());
1340     *success = true;
1341     return true;
1342   }
1343 
1344   AnyRegister reg = requirement.allocation().toRegister();
1345   return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
1346                              conflicting);
1347 }
1348 
tryAllocateNonFixed(LiveBundle * bundle,Requirement requirement,Requirement hint,bool * success,bool * pfixed,LiveBundleVector & conflicting)1349 bool BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
1350                                                 Requirement requirement,
1351                                                 Requirement hint, bool* success,
1352                                                 bool* pfixed,
1353                                                 LiveBundleVector& conflicting) {
1354   // If we want, but do not require a bundle to be in a specific register,
1355   // only look at that register for allocating and evict or spill if it is
1356   // not available. Picking a separate register may be even worse than
1357   // spilling, as it will still necessitate moves and will tie up more
1358   // registers than if we spilled.
1359   if (hint.kind() == Requirement::FIXED) {
1360     AnyRegister reg = hint.allocation().toRegister();
1361     if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
1362                              conflicting)) {
1363       return false;
1364     }
1365     if (*success) {
1366       return true;
1367     }
1368   }
1369 
1370   // Spill bundles which have no hint or register requirement.
1371   if (requirement.kind() == Requirement::NONE &&
1372       hint.kind() != Requirement::REGISTER) {
1373     JitSpew(JitSpew_RegAlloc,
1374             "  postponed spill (no hint or register requirement)");
1375     if (!spilledBundles.append(bundle)) {
1376       return false;
1377     }
1378     *success = true;
1379     return true;
1380   }
1381 
1382   if (conflicting.empty() || minimalBundle(bundle)) {
1383     // Search for any available register which the bundle can be
1384     // allocated to.
1385     for (size_t i = 0; i < AnyRegister::Total; i++) {
1386       if (!tryAllocateRegister(registers[i], bundle, success, pfixed,
1387                                conflicting)) {
1388         return false;
1389       }
1390       if (*success) {
1391         return true;
1392       }
1393     }
1394   }
1395 
1396   // Spill bundles which have no register requirement if they didn't get
1397   // allocated.
1398   if (requirement.kind() == Requirement::NONE) {
1399     JitSpew(JitSpew_RegAlloc, "  postponed spill (no register requirement)");
1400     if (!spilledBundles.append(bundle)) {
1401       return false;
1402     }
1403     *success = true;
1404     return true;
1405   }
1406 
1407   // We failed to allocate this bundle.
1408   MOZ_ASSERT(!*success);
1409   return true;
1410 }
1411 
processBundle(MIRGenerator * mir,LiveBundle * bundle)1412 bool BacktrackingAllocator::processBundle(MIRGenerator* mir,
1413                                           LiveBundle* bundle) {
1414   JitSpewIfEnabled(JitSpew_RegAlloc,
1415                    "Allocating %s [priority %zu] [weight %zu]",
1416                    bundle->toString().get(), computePriority(bundle),
1417                    computeSpillWeight(bundle));
1418 
1419   // A bundle can be processed by doing any of the following:
1420   //
1421   // - Assigning the bundle a register. The bundle cannot overlap any other
1422   //   bundle allocated for that physical register.
1423   //
1424   // - Spilling the bundle, provided it has no register uses.
1425   //
1426   // - Splitting the bundle into two or more bundles which cover the original
1427   //   one. The new bundles are placed back onto the priority queue for later
1428   //   processing.
1429   //
1430   // - Evicting one or more existing allocated bundles, and then doing one
1431   //   of the above operations. Evicted bundles are placed back on the
1432   //   priority queue. Any evicted bundles must have a lower spill weight
1433   //   than the bundle being processed.
1434   //
1435   // As long as this structure is followed, termination is guaranteed.
1436   // In general, we want to minimize the amount of bundle splitting (which
1437   // generally necessitates spills), so allocate longer lived, lower weight
1438   // bundles first and evict and split them later if they prevent allocation
1439   // for higher weight bundles.
1440 
1441   Requirement requirement, hint;
1442   bool canAllocate = computeRequirement(bundle, &requirement, &hint);
1443 
1444   bool fixed;
1445   LiveBundleVector conflicting;
1446   for (size_t attempt = 0;; attempt++) {
1447     if (mir->shouldCancel("Backtracking Allocation (processBundle loop)")) {
1448       return false;
1449     }
1450 
1451     if (canAllocate) {
1452       bool success = false;
1453       fixed = false;
1454       conflicting.clear();
1455 
1456       // Ok, let's try allocating for this bundle.
1457       if (requirement.kind() == Requirement::FIXED) {
1458         if (!tryAllocateFixed(bundle, requirement, &success, &fixed,
1459                               conflicting)) {
1460           return false;
1461         }
1462       } else {
1463         if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed,
1464                                  conflicting)) {
1465           return false;
1466         }
1467       }
1468 
1469       // If that worked, we're done!
1470       if (success) {
1471         return true;
1472       }
1473 
1474       // If that didn't work, but we have one or more non-fixed bundles
1475       // known to be conflicting, maybe we can evict them and try again.
1476       if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) && !fixed &&
1477           !conflicting.empty() &&
1478           maximumSpillWeight(conflicting) < computeSpillWeight(bundle)) {
1479         for (size_t i = 0; i < conflicting.length(); i++) {
1480           if (!evictBundle(conflicting[i])) {
1481             return false;
1482           }
1483         }
1484         continue;
1485       }
1486     }
1487 
1488     // A minimal bundle cannot be split any further. If we try to split it
1489     // it at this point we will just end up with the same bundle and will
1490     // enter an infinite loop. Weights and the initial live ranges must
1491     // be constructed so that any minimal bundle is allocatable.
1492     MOZ_ASSERT(!minimalBundle(bundle));
1493 
1494     LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
1495     return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
1496   }
1497 }
1498 
computeRequirement(LiveBundle * bundle,Requirement * requirement,Requirement * hint)1499 bool BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
1500                                                Requirement* requirement,
1501                                                Requirement* hint) {
1502   // Set any requirement or hint on bundle according to its definition and
1503   // uses. Return false if there are conflicting requirements which will
1504   // require the bundle to be split.
1505 
1506   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1507        iter++) {
1508     LiveRange* range = LiveRange::get(*iter);
1509     VirtualRegister& reg = vregs[range->vreg()];
1510 
1511     if (range->hasDefinition()) {
1512       // Deal with any definition constraints/hints.
1513       LDefinition::Policy policy = reg.def()->policy();
1514       if (policy == LDefinition::FIXED || policy == LDefinition::STACK) {
1515         // Fixed and stack policies get a FIXED requirement.  (In the stack
1516         // case, the allocation should have been performed already by
1517         // mergeAndQueueRegisters.)
1518         JitSpewIfEnabled(JitSpew_RegAlloc,
1519                          "  Requirement %s, fixed by definition",
1520                          reg.def()->output()->toString().get());
1521         if (!requirement->merge(Requirement(*reg.def()->output()))) {
1522           return false;
1523         }
1524       } else if (reg.ins()->isPhi()) {
1525         // Phis don't have any requirements, but they should prefer their
1526         // input allocations. This is captured by the group hints above.
1527       } else {
1528         // Non-phis get a REGISTER requirement.
1529         if (!requirement->merge(Requirement(Requirement::REGISTER))) {
1530           return false;
1531         }
1532       }
1533     }
1534 
1535     // Search uses for requirements.
1536     for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
1537       LUse::Policy policy = iter->usePolicy();
1538       if (policy == LUse::FIXED) {
1539         AnyRegister required = GetFixedRegister(reg.def(), iter->use());
1540 
1541         JitSpewIfEnabled(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
1542                          required.name(), iter->pos.bits());
1543 
1544         // If there are multiple fixed registers which the bundle is
1545         // required to use, fail. The bundle will need to be split before
1546         // it can be allocated.
1547         if (!requirement->merge(Requirement(LAllocation(required)))) {
1548           return false;
1549         }
1550       } else if (policy == LUse::REGISTER) {
1551         if (!requirement->merge(Requirement(Requirement::REGISTER))) {
1552           return false;
1553         }
1554       } else if (policy == LUse::ANY) {
1555         // ANY differs from KEEPALIVE by actively preferring a register.
1556         if (!hint->merge(Requirement(Requirement::REGISTER))) {
1557           return false;
1558         }
1559       }
1560 
1561       // The only case of STACK use policies is individual stack results using
1562       // their containing stack result area, which is given a fixed allocation
1563       // above.
1564       MOZ_ASSERT_IF(policy == LUse::STACK,
1565                     requirement->kind() == Requirement::FIXED);
1566       MOZ_ASSERT_IF(policy == LUse::STACK,
1567                     requirement->allocation().isStackArea());
1568     }
1569   }
1570 
1571   return true;
1572 }
1573 
tryAllocateRegister(PhysicalRegister & r,LiveBundle * bundle,bool * success,bool * pfixed,LiveBundleVector & conflicting)1574 bool BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r,
1575                                                 LiveBundle* bundle,
1576                                                 bool* success, bool* pfixed,
1577                                                 LiveBundleVector& conflicting) {
1578   *success = false;
1579 
1580   if (!r.allocatable) {
1581     return true;
1582   }
1583 
1584   LiveBundleVector aliasedConflicting;
1585 
1586   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1587        iter++) {
1588     LiveRange* range = LiveRange::get(*iter);
1589     VirtualRegister& reg = vregs[range->vreg()];
1590 
1591     if (!reg.isCompatible(r.reg)) {
1592       return true;
1593     }
1594 
1595     for (size_t a = 0; a < r.reg.numAliased(); a++) {
1596       PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
1597       LiveRange* existing;
1598       if (!rAlias.allocations.contains(range, &existing)) {
1599         continue;
1600       }
1601       if (existing->hasVreg()) {
1602         MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
1603         bool duplicate = false;
1604         for (size_t i = 0; i < aliasedConflicting.length(); i++) {
1605           if (aliasedConflicting[i] == existing->bundle()) {
1606             duplicate = true;
1607             break;
1608           }
1609         }
1610         if (!duplicate && !aliasedConflicting.append(existing->bundle())) {
1611           return false;
1612         }
1613       } else {
1614         JitSpewIfEnabled(JitSpew_RegAlloc, "  %s collides with fixed use %s",
1615                          rAlias.reg.name(), existing->toString().get());
1616         *pfixed = true;
1617         return true;
1618       }
1619     }
1620   }
1621 
1622   if (!aliasedConflicting.empty()) {
1623     // One or more aliased registers is allocated to another bundle
1624     // overlapping this one. Keep track of the conflicting set, and in the
1625     // case of multiple conflicting sets keep track of the set with the
1626     // lowest maximum spill weight.
1627 
1628     // The #ifdef guards against "unused variable 'existing'" bustage.
1629 #ifdef JS_JITSPEW
1630     if (JitSpewEnabled(JitSpew_RegAlloc)) {
1631       if (aliasedConflicting.length() == 1) {
1632         LiveBundle* existing = aliasedConflicting[0];
1633         JitSpew(JitSpew_RegAlloc, "  %s collides with %s [weight %zu]",
1634                 r.reg.name(), existing->toString().get(),
1635                 computeSpillWeight(existing));
1636       } else {
1637         JitSpew(JitSpew_RegAlloc, "  %s collides with the following",
1638                 r.reg.name());
1639         for (size_t i = 0; i < aliasedConflicting.length(); i++) {
1640           LiveBundle* existing = aliasedConflicting[i];
1641           JitSpew(JitSpew_RegAlloc, "      %s [weight %zu]",
1642                   existing->toString().get(), computeSpillWeight(existing));
1643         }
1644       }
1645     }
1646 #endif
1647 
1648     if (conflicting.empty()) {
1649       conflicting = std::move(aliasedConflicting);
1650     } else {
1651       if (maximumSpillWeight(aliasedConflicting) <
1652           maximumSpillWeight(conflicting)) {
1653         conflicting = std::move(aliasedConflicting);
1654       }
1655     }
1656     return true;
1657   }
1658 
1659   JitSpewIfEnabled(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
1660 
1661   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1662        iter++) {
1663     LiveRange* range = LiveRange::get(*iter);
1664     if (!alloc().ensureBallast()) {
1665       return false;
1666     }
1667     if (!r.allocations.insert(range)) {
1668       return false;
1669     }
1670   }
1671 
1672   bundle->setAllocation(LAllocation(r.reg));
1673   *success = true;
1674   return true;
1675 }
1676 
evictBundle(LiveBundle * bundle)1677 bool BacktrackingAllocator::evictBundle(LiveBundle* bundle) {
1678   JitSpewIfEnabled(JitSpew_RegAlloc,
1679                    "  Evicting %s [priority %zu] [weight %zu]",
1680                    bundle->toString().get(), computePriority(bundle),
1681                    computeSpillWeight(bundle));
1682 
1683   AnyRegister reg(bundle->allocation().toRegister());
1684   PhysicalRegister& physical = registers[reg.code()];
1685   MOZ_ASSERT(physical.reg == reg && physical.allocatable);
1686 
1687   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1688        iter++) {
1689     LiveRange* range = LiveRange::get(*iter);
1690     physical.allocations.remove(range);
1691   }
1692 
1693   bundle->setAllocation(LAllocation());
1694 
1695   size_t priority = computePriority(bundle);
1696   return allocationQueue.insert(QueueItem(bundle, priority));
1697 }
1698 
splitAndRequeueBundles(LiveBundle * bundle,const LiveBundleVector & newBundles)1699 bool BacktrackingAllocator::splitAndRequeueBundles(
1700     LiveBundle* bundle, const LiveBundleVector& newBundles) {
1701 #ifdef DEBUG
1702   if (newBundles.length() == 1) {
1703     LiveBundle* newBundle = newBundles[0];
1704     if (newBundle->numRanges() == bundle->numRanges() &&
1705         computePriority(newBundle) == computePriority(bundle)) {
1706       bool different = false;
1707       LiveRange::BundleLinkIterator oldRanges = bundle->rangesBegin();
1708       LiveRange::BundleLinkIterator newRanges = newBundle->rangesBegin();
1709       while (oldRanges) {
1710         LiveRange* oldRange = LiveRange::get(*oldRanges);
1711         LiveRange* newRange = LiveRange::get(*newRanges);
1712         if (oldRange->from() != newRange->from() ||
1713             oldRange->to() != newRange->to()) {
1714           different = true;
1715           break;
1716         }
1717         oldRanges++;
1718         newRanges++;
1719       }
1720 
1721       // This is likely to trigger an infinite loop in register allocation. This
1722       // can be the result of invalid register constraints, making regalloc
1723       // impossible; consider relaxing those.
1724       MOZ_ASSERT(different,
1725                  "Split results in the same bundle with the same priority");
1726     }
1727   }
1728 #endif
1729 
1730   if (JitSpewEnabled(JitSpew_RegAlloc)) {
1731     JitSpew(JitSpew_RegAlloc,
1732             "    splitting bundle %s into:", bundle->toString().get());
1733     for (size_t i = 0; i < newBundles.length(); i++) {
1734       JitSpew(JitSpew_RegAlloc, "      %s", newBundles[i]->toString().get());
1735     }
1736   }
1737 
1738   // Remove all ranges in the old bundle from their register's list.
1739   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1740        iter++) {
1741     LiveRange* range = LiveRange::get(*iter);
1742     vregs[range->vreg()].removeRange(range);
1743   }
1744 
1745   // Add all ranges in the new bundles to their register's list.
1746   for (size_t i = 0; i < newBundles.length(); i++) {
1747     LiveBundle* newBundle = newBundles[i];
1748     for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter;
1749          iter++) {
1750       LiveRange* range = LiveRange::get(*iter);
1751       vregs[range->vreg()].addRange(range);
1752     }
1753   }
1754 
1755   // Queue the new bundles for register assignment.
1756   for (size_t i = 0; i < newBundles.length(); i++) {
1757     LiveBundle* newBundle = newBundles[i];
1758     size_t priority = computePriority(newBundle);
1759     if (!allocationQueue.insert(QueueItem(newBundle, priority))) {
1760       return false;
1761     }
1762   }
1763 
1764   return true;
1765 }
1766 
spill(LiveBundle * bundle)1767 bool BacktrackingAllocator::spill(LiveBundle* bundle) {
1768   JitSpew(JitSpew_RegAlloc, "  Spilling bundle");
1769   MOZ_ASSERT(bundle->allocation().isBogus());
1770 
1771   if (LiveBundle* spillParent = bundle->spillParent()) {
1772     JitSpew(JitSpew_RegAlloc, "    Using existing spill bundle");
1773     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1774          iter++) {
1775       LiveRange* range = LiveRange::get(*iter);
1776       LiveRange* parentRange = spillParent->rangeFor(range->from());
1777       MOZ_ASSERT(parentRange->contains(range));
1778       MOZ_ASSERT(range->vreg() == parentRange->vreg());
1779       range->distributeUses(parentRange);
1780       MOZ_ASSERT(!range->hasUses());
1781       vregs[range->vreg()].removeRange(range);
1782     }
1783     return true;
1784   }
1785 
1786   return bundle->spillSet()->addSpilledBundle(bundle);
1787 }
1788 
tryAllocatingRegistersForSpillBundles()1789 bool BacktrackingAllocator::tryAllocatingRegistersForSpillBundles() {
1790   for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
1791     LiveBundle* bundle = *it;
1792     LiveBundleVector conflicting;
1793     bool fixed = false;
1794     bool success = false;
1795 
1796     if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles")) {
1797       return false;
1798     }
1799 
1800     JitSpewIfEnabled(JitSpew_RegAlloc, "Spill or allocate %s",
1801                      bundle->toString().get());
1802 
1803     // Search for any available register which the bundle can be
1804     // allocated to.
1805     for (size_t i = 0; i < AnyRegister::Total; i++) {
1806       if (!tryAllocateRegister(registers[i], bundle, &success, &fixed,
1807                                conflicting)) {
1808         return false;
1809       }
1810       if (success) {
1811         break;
1812       }
1813     }
1814 
1815     // If the bundle still has no register, spill the bundle.
1816     if (!success && !spill(bundle)) {
1817       return false;
1818     }
1819   }
1820 
1821   return true;
1822 }
1823 
pickStackSlots()1824 bool BacktrackingAllocator::pickStackSlots() {
1825   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1826     VirtualRegister& reg = vregs[i];
1827 
1828     if (mir->shouldCancel("Backtracking Pick Stack Slots")) {
1829       return false;
1830     }
1831 
1832     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1833          iter++) {
1834       LiveRange* range = LiveRange::get(*iter);
1835       LiveBundle* bundle = range->bundle();
1836 
1837       if (bundle->allocation().isBogus()) {
1838         if (!pickStackSlot(bundle->spillSet())) {
1839           return false;
1840         }
1841         MOZ_ASSERT(!bundle->allocation().isBogus());
1842       }
1843     }
1844   }
1845 
1846   return true;
1847 }
1848 
pickStackSlot(SpillSet * spillSet)1849 bool BacktrackingAllocator::pickStackSlot(SpillSet* spillSet) {
1850   // Look through all ranges that have been spilled in this set for a
1851   // register definition which is fixed to a stack or argument slot. If we
1852   // find one, use it for all bundles that have been spilled. tryMergeBundles
1853   // makes sure this reuse is possible when an initial bundle contains ranges
1854   // from multiple virtual registers.
1855   for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1856     LiveBundle* bundle = spillSet->spilledBundle(i);
1857     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1858          iter++) {
1859       LiveRange* range = LiveRange::get(*iter);
1860       if (range->hasDefinition()) {
1861         LDefinition* def = vregs[range->vreg()].def();
1862         if (def->policy() == LDefinition::FIXED) {
1863           MOZ_ASSERT(!def->output()->isRegister());
1864           MOZ_ASSERT(!def->output()->isStackSlot());
1865           spillSet->setAllocation(*def->output());
1866           return true;
1867         }
1868       }
1869     }
1870   }
1871 
1872   LDefinition::Type type =
1873       vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type();
1874 
1875   SpillSlotList* slotList;
1876   switch (StackSlotAllocator::width(type)) {
1877     case 4:
1878       slotList = &normalSlots;
1879       break;
1880     case 8:
1881       slotList = &doubleSlots;
1882       break;
1883     case 16:
1884       slotList = &quadSlots;
1885       break;
1886     default:
1887       MOZ_CRASH("Bad width");
1888   }
1889 
1890   // Maximum number of existing spill slots we will look at before giving up
1891   // and allocating a new slot.
1892   static const size_t MAX_SEARCH_COUNT = 10;
1893 
1894   size_t searches = 0;
1895   SpillSlot* stop = nullptr;
1896   while (!slotList->empty()) {
1897     SpillSlot* spillSlot = *slotList->begin();
1898     if (!stop) {
1899       stop = spillSlot;
1900     } else if (stop == spillSlot) {
1901       // We looked through every slot in the list.
1902       break;
1903     }
1904 
1905     bool success = true;
1906     for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1907       LiveBundle* bundle = spillSet->spilledBundle(i);
1908       for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1909            iter++) {
1910         LiveRange* range = LiveRange::get(*iter);
1911         LiveRange* existing;
1912         if (spillSlot->allocated.contains(range, &existing)) {
1913           success = false;
1914           break;
1915         }
1916       }
1917       if (!success) {
1918         break;
1919       }
1920     }
1921     if (success) {
1922       // We can reuse this physical stack slot for the new bundles.
1923       // Update the allocated ranges for the slot.
1924       for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1925         LiveBundle* bundle = spillSet->spilledBundle(i);
1926         if (!insertAllRanges(spillSlot->allocated, bundle)) {
1927           return false;
1928         }
1929       }
1930       spillSet->setAllocation(spillSlot->alloc);
1931       return true;
1932     }
1933 
1934     // On a miss, move the spill to the end of the list. This will cause us
1935     // to make fewer attempts to allocate from slots with a large and
1936     // highly contended range.
1937     slotList->popFront();
1938     slotList->pushBack(spillSlot);
1939 
1940     if (++searches == MAX_SEARCH_COUNT) {
1941       break;
1942     }
1943   }
1944 
1945   // We need a new physical stack slot.
1946   uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
1947 
1948   SpillSlot* spillSlot =
1949       new (alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
1950   if (!spillSlot) {
1951     return false;
1952   }
1953 
1954   for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1955     LiveBundle* bundle = spillSet->spilledBundle(i);
1956     if (!insertAllRanges(spillSlot->allocated, bundle)) {
1957       return false;
1958     }
1959   }
1960 
1961   spillSet->setAllocation(spillSlot->alloc);
1962 
1963   slotList->pushFront(spillSlot);
1964   return true;
1965 }
1966 
insertAllRanges(LiveRangeSet & set,LiveBundle * bundle)1967 bool BacktrackingAllocator::insertAllRanges(LiveRangeSet& set,
1968                                             LiveBundle* bundle) {
1969   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1970        iter++) {
1971     LiveRange* range = LiveRange::get(*iter);
1972     if (!alloc().ensureBallast()) {
1973       return false;
1974     }
1975     if (!set.insert(range)) {
1976       return false;
1977     }
1978   }
1979   return true;
1980 }
1981 
deadRange(LiveRange * range)1982 bool BacktrackingAllocator::deadRange(LiveRange* range) {
1983   // Check for direct uses of this range.
1984   if (range->hasUses() || range->hasDefinition()) {
1985     return false;
1986   }
1987 
1988   CodePosition start = range->from();
1989   LNode* ins = insData[start];
1990   if (start == entryOf(ins->block())) {
1991     return false;
1992   }
1993 
1994   VirtualRegister& reg = vregs[range->vreg()];
1995 
1996   // Check if there are later ranges for this vreg.
1997   LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
1998   for (iter++; iter; iter++) {
1999     LiveRange* laterRange = LiveRange::get(*iter);
2000     if (laterRange->from() > range->from()) {
2001       return false;
2002     }
2003   }
2004 
2005   // Check if this range ends at a loop backedge.
2006   LNode* last = insData[range->to().previous()];
2007   if (last->isGoto() &&
2008       last->toGoto()->target()->id() < last->block()->mir()->id()) {
2009     return false;
2010   }
2011 
2012   // Check if there are phis which this vreg flows to.
2013   if (reg.usedByPhi()) {
2014     return false;
2015   }
2016 
2017   return true;
2018 }
2019 
moveAtEdge(LBlock * predecessor,LBlock * successor,LiveRange * from,LiveRange * to,LDefinition::Type type)2020 bool BacktrackingAllocator::moveAtEdge(LBlock* predecessor, LBlock* successor,
2021                                        LiveRange* from, LiveRange* to,
2022                                        LDefinition::Type type) {
2023   if (successor->mir()->numPredecessors() > 1) {
2024     MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
2025     return moveAtExit(predecessor, from, to, type);
2026   }
2027 
2028   return moveAtEntry(successor, from, to, type);
2029 }
2030 
resolveControlFlow()2031 bool BacktrackingAllocator::resolveControlFlow() {
2032   // Add moves to handle changing assignments for vregs over their lifetime.
2033   JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
2034 
2035   // Look for places where a register's assignment changes in the middle of a
2036   // basic block.
2037   MOZ_ASSERT(!vregs[0u].hasRanges());
2038   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2039     VirtualRegister& reg = vregs[i];
2040 
2041     if (mir->shouldCancel(
2042             "Backtracking Resolve Control Flow (vreg outer loop)")) {
2043       return false;
2044     }
2045 
2046     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;) {
2047       LiveRange* range = LiveRange::get(*iter);
2048 
2049       if (mir->shouldCancel(
2050               "Backtracking Resolve Control Flow (vreg inner loop)")) {
2051         return false;
2052       }
2053 
2054       // Remove ranges which will never be used.
2055       if (deadRange(range)) {
2056         reg.removeRangeAndIncrement(iter);
2057         continue;
2058       }
2059 
2060       // The range which defines the register does not have a predecessor
2061       // to add moves from.
2062       if (range->hasDefinition()) {
2063         iter++;
2064         continue;
2065       }
2066 
2067       // Ignore ranges that start at block boundaries. We will handle
2068       // these in the next phase.
2069       CodePosition start = range->from();
2070       LNode* ins = insData[start];
2071       if (start == entryOf(ins->block())) {
2072         iter++;
2073         continue;
2074       }
2075 
2076       // If we already saw a range which covers the start of this range
2077       // and has the same allocation, we don't need an explicit move at
2078       // the start of this range.
2079       bool skip = false;
2080       for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
2081            prevIter != iter; prevIter++) {
2082         LiveRange* prevRange = LiveRange::get(*prevIter);
2083         if (prevRange->covers(start) && prevRange->bundle()->allocation() ==
2084                                             range->bundle()->allocation()) {
2085           skip = true;
2086           break;
2087         }
2088       }
2089       if (skip) {
2090         iter++;
2091         continue;
2092       }
2093 
2094       if (!alloc().ensureBallast()) {
2095         return false;
2096       }
2097 
2098       LiveRange* predecessorRange =
2099           reg.rangeFor(start.previous(), /* preferRegister = */ true);
2100       if (start.subpos() == CodePosition::INPUT) {
2101         if (!moveInput(ins->toInstruction(), predecessorRange, range,
2102                        reg.type())) {
2103           return false;
2104         }
2105       } else {
2106         if (!moveAfter(ins->toInstruction(), predecessorRange, range,
2107                        reg.type())) {
2108           return false;
2109         }
2110       }
2111 
2112       iter++;
2113     }
2114   }
2115 
2116   JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
2117 
2118   for (size_t i = 0; i < graph.numBlocks(); i++) {
2119     if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) {
2120       return false;
2121     }
2122 
2123     LBlock* successor = graph.getBlock(i);
2124     MBasicBlock* mSuccessor = successor->mir();
2125     if (mSuccessor->numPredecessors() < 1) {
2126       continue;
2127     }
2128 
2129     // Resolve phis to moves.
2130     for (size_t j = 0; j < successor->numPhis(); j++) {
2131       LPhi* phi = successor->getPhi(j);
2132       MOZ_ASSERT(phi->numDefs() == 1);
2133       LDefinition* def = phi->getDef(0);
2134       VirtualRegister& reg = vreg(def);
2135       LiveRange* to = reg.rangeFor(entryOf(successor));
2136       MOZ_ASSERT(to);
2137 
2138       for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
2139         LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
2140         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
2141 
2142         LAllocation* input = phi->getOperand(k);
2143         LiveRange* from = vreg(input).rangeFor(exitOf(predecessor),
2144                                                /* preferRegister = */ true);
2145         MOZ_ASSERT(from);
2146 
2147         if (!alloc().ensureBallast()) {
2148           return false;
2149         }
2150 
2151         // Note: we have to use moveAtEdge both here and below (for edge
2152         // resolution) to avoid conflicting moves. See bug 1493900.
2153         if (!moveAtEdge(predecessor, successor, from, to, def->type())) {
2154           return false;
2155         }
2156       }
2157     }
2158   }
2159 
2160   // Add moves to resolve graph edges with different allocations at their
2161   // source and target.
2162   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2163     VirtualRegister& reg = vregs[i];
2164     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2165          iter++) {
2166       LiveRange* targetRange = LiveRange::get(*iter);
2167 
2168       size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
2169       if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId)))) {
2170         firstBlockId++;
2171       }
2172       for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
2173         LBlock* successor = graph.getBlock(id);
2174         if (!targetRange->covers(entryOf(successor))) {
2175           break;
2176         }
2177 
2178         BitSet& live = liveIn[id];
2179         if (!live.contains(i)) {
2180           continue;
2181         }
2182 
2183         for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
2184           LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
2185           if (targetRange->covers(exitOf(predecessor))) {
2186             continue;
2187           }
2188 
2189           if (!alloc().ensureBallast()) {
2190             return false;
2191           }
2192           LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
2193           if (!moveAtEdge(predecessor, successor, from, targetRange,
2194                           reg.type())) {
2195             return false;
2196           }
2197         }
2198       }
2199     }
2200   }
2201 
2202   return true;
2203 }
2204 
isReusedInput(LUse * use,LNode * ins,bool considerCopy)2205 bool BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins,
2206                                           bool considerCopy) {
2207   if (LDefinition* def = FindReusingDefOrTemp(ins, use)) {
2208     return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
2209   }
2210   return false;
2211 }
2212 
isRegisterUse(UsePosition * use,LNode * ins,bool considerCopy)2213 bool BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins,
2214                                           bool considerCopy) {
2215   switch (use->usePolicy()) {
2216     case LUse::ANY:
2217       return isReusedInput(use->use(), ins, considerCopy);
2218 
2219     case LUse::REGISTER:
2220     case LUse::FIXED:
2221       return true;
2222 
2223     default:
2224       return false;
2225   }
2226 }
2227 
isRegisterDefinition(LiveRange * range)2228 bool BacktrackingAllocator::isRegisterDefinition(LiveRange* range) {
2229   if (!range->hasDefinition()) {
2230     return false;
2231   }
2232 
2233   VirtualRegister& reg = vregs[range->vreg()];
2234   if (reg.ins()->isPhi()) {
2235     return false;
2236   }
2237 
2238   if (reg.def()->policy() == LDefinition::FIXED &&
2239       !reg.def()->output()->isRegister()) {
2240     return false;
2241   }
2242 
2243   return true;
2244 }
2245 
reifyAllocations()2246 bool BacktrackingAllocator::reifyAllocations() {
2247   JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
2248 
2249   MOZ_ASSERT(!vregs[0u].hasRanges());
2250   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2251     VirtualRegister& reg = vregs[i];
2252 
2253     if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) {
2254       return false;
2255     }
2256 
2257     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2258          iter++) {
2259       LiveRange* range = LiveRange::get(*iter);
2260 
2261       if (range->hasDefinition()) {
2262         reg.def()->setOutput(range->bundle()->allocation());
2263         if (reg.ins()->recoversInput()) {
2264           LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
2265           for (size_t i = 0; i < snapshot->numEntries(); i++) {
2266             LAllocation* entry = snapshot->getEntry(i);
2267             if (entry->isUse() &&
2268                 entry->toUse()->policy() == LUse::RECOVERED_INPUT) {
2269               *entry = *reg.def()->output();
2270             }
2271           }
2272         }
2273       }
2274 
2275       for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
2276         LAllocation* alloc = iter->use();
2277         *alloc = range->bundle()->allocation();
2278 
2279         // For any uses which feed into MUST_REUSE_INPUT definitions,
2280         // add copies if the use and def have different allocations.
2281         LNode* ins = insData[iter->pos];
2282         if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
2283           LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
2284           LAllocation res = outputRange->bundle()->allocation();
2285           LAllocation sourceAlloc = range->bundle()->allocation();
2286 
2287           if (res != *alloc) {
2288             if (!this->alloc().ensureBallast()) {
2289               return false;
2290             }
2291             if (NumReusingDefs(ins->toInstruction()) <= 1) {
2292               LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
2293               if (!group->addAfter(sourceAlloc, res, reg.type())) {
2294                 return false;
2295               }
2296             } else {
2297               LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
2298               if (!group->add(sourceAlloc, res, reg.type())) {
2299                 return false;
2300               }
2301             }
2302             *alloc = res;
2303           }
2304         }
2305       }
2306 
2307       addLiveRegistersForRange(reg, range);
2308     }
2309   }
2310 
2311   graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
2312   return true;
2313 }
2314 
findFirstNonCallSafepoint(CodePosition from)2315 size_t BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from) {
2316   size_t i = 0;
2317   for (; i < graph.numNonCallSafepoints(); i++) {
2318     const LInstruction* ins = graph.getNonCallSafepoint(i);
2319     if (from <= inputOf(ins)) {
2320       break;
2321     }
2322   }
2323   return i;
2324 }
2325 
addLiveRegistersForRange(VirtualRegister & reg,LiveRange * range)2326 void BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg,
2327                                                      LiveRange* range) {
2328   // Fill in the live register sets for all non-call safepoints.
2329   LAllocation a = range->bundle()->allocation();
2330   if (!a.isRegister()) {
2331     return;
2332   }
2333 
2334   // Don't add output registers to the safepoint.
2335   CodePosition start = range->from();
2336   if (range->hasDefinition() && !reg.isTemp()) {
2337 #ifdef CHECK_OSIPOINT_REGISTERS
2338     // We don't add the output register to the safepoint,
2339     // but it still might get added as one of the inputs.
2340     // So eagerly add this reg to the safepoint clobbered registers.
2341     if (reg.ins()->isInstruction()) {
2342       if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) {
2343         safepoint->addClobberedRegister(a.toRegister());
2344       }
2345     }
2346 #endif
2347     start = start.next();
2348   }
2349 
2350   size_t i = findFirstNonCallSafepoint(start);
2351   for (; i < graph.numNonCallSafepoints(); i++) {
2352     LInstruction* ins = graph.getNonCallSafepoint(i);
2353     CodePosition pos = inputOf(ins);
2354 
2355     // Safepoints are sorted, so we can shortcut out of this loop
2356     // if we go out of range.
2357     if (range->to() <= pos) {
2358       break;
2359     }
2360 
2361     MOZ_ASSERT(range->covers(pos));
2362 
2363     LSafepoint* safepoint = ins->safepoint();
2364     safepoint->addLiveRegister(a.toRegister());
2365 
2366 #ifdef CHECK_OSIPOINT_REGISTERS
2367     if (reg.isTemp()) {
2368       safepoint->addClobberedRegister(a.toRegister());
2369     }
2370 #endif
2371   }
2372 }
2373 
IsNunbox(VirtualRegister & reg)2374 static inline bool IsNunbox(VirtualRegister& reg) {
2375 #ifdef JS_NUNBOX32
2376   return reg.type() == LDefinition::TYPE || reg.type() == LDefinition::PAYLOAD;
2377 #else
2378   return false;
2379 #endif
2380 }
2381 
IsSlotsOrElements(VirtualRegister & reg)2382 static inline bool IsSlotsOrElements(VirtualRegister& reg) {
2383   return reg.type() == LDefinition::SLOTS;
2384 }
2385 
IsTraceable(VirtualRegister & reg)2386 static inline bool IsTraceable(VirtualRegister& reg) {
2387   if (reg.type() == LDefinition::OBJECT) {
2388     return true;
2389   }
2390 #ifdef JS_PUNBOX64
2391   if (reg.type() == LDefinition::BOX) {
2392     return true;
2393   }
2394 #endif
2395   if (reg.type() == LDefinition::STACKRESULTS) {
2396     MOZ_ASSERT(reg.def());
2397     const LStackArea* alloc = reg.def()->output()->toStackArea();
2398     for (auto iter = alloc->results(); iter; iter.next()) {
2399       if (iter.isGcPointer()) {
2400         return true;
2401       }
2402     }
2403   }
2404   return false;
2405 }
2406 
findFirstSafepoint(CodePosition pos,size_t startFrom)2407 size_t BacktrackingAllocator::findFirstSafepoint(CodePosition pos,
2408                                                  size_t startFrom) {
2409   size_t i = startFrom;
2410   for (; i < graph.numSafepoints(); i++) {
2411     LInstruction* ins = graph.getSafepoint(i);
2412     if (pos <= inputOf(ins)) {
2413       break;
2414     }
2415   }
2416   return i;
2417 }
2418 
populateSafepoints()2419 bool BacktrackingAllocator::populateSafepoints() {
2420   JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
2421 
2422   size_t firstSafepoint = 0;
2423 
2424   MOZ_ASSERT(!vregs[0u].def());
2425   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2426     VirtualRegister& reg = vregs[i];
2427 
2428     if (!reg.def() ||
2429         (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) {
2430       continue;
2431     }
2432 
2433     firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
2434     if (firstSafepoint >= graph.numSafepoints()) {
2435       break;
2436     }
2437 
2438     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2439          iter++) {
2440       LiveRange* range = LiveRange::get(*iter);
2441 
2442       for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
2443         LInstruction* ins = graph.getSafepoint(j);
2444 
2445         if (!range->covers(inputOf(ins))) {
2446           if (inputOf(ins) >= range->to()) {
2447             break;
2448           }
2449           continue;
2450         }
2451 
2452         // Include temps but not instruction outputs. Also make sure
2453         // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
2454         // we would have to add the input reg to this safepoint.
2455         if (ins == reg.ins() && !reg.isTemp()) {
2456           DebugOnly<LDefinition*> def = reg.def();
2457           MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
2458                         def->type() == LDefinition::GENERAL ||
2459                             def->type() == LDefinition::INT32 ||
2460                             def->type() == LDefinition::FLOAT32 ||
2461                             def->type() == LDefinition::DOUBLE ||
2462                             def->type() == LDefinition::SIMD128);
2463           continue;
2464         }
2465 
2466         LSafepoint* safepoint = ins->safepoint();
2467 
2468         LAllocation a = range->bundle()->allocation();
2469         if (a.isGeneralReg() && ins->isCall()) {
2470           continue;
2471         }
2472 
2473         switch (reg.type()) {
2474           case LDefinition::OBJECT:
2475             if (!safepoint->addGcPointer(a)) {
2476               return false;
2477             }
2478             break;
2479           case LDefinition::SLOTS:
2480             if (!safepoint->addSlotsOrElementsPointer(a)) {
2481               return false;
2482             }
2483             break;
2484           case LDefinition::STACKRESULTS: {
2485             MOZ_ASSERT(a.isStackArea());
2486             for (auto iter = a.toStackArea()->results(); iter; iter.next()) {
2487               if (iter.isGcPointer()) {
2488                 if (!safepoint->addGcPointer(iter.alloc())) {
2489                   return false;
2490                 }
2491               }
2492             }
2493             break;
2494           }
2495 #ifdef JS_NUNBOX32
2496           case LDefinition::TYPE:
2497             if (!safepoint->addNunboxType(i, a)) {
2498               return false;
2499             }
2500             break;
2501           case LDefinition::PAYLOAD:
2502             if (!safepoint->addNunboxPayload(i, a)) {
2503               return false;
2504             }
2505             break;
2506 #else
2507           case LDefinition::BOX:
2508             if (!safepoint->addBoxedValue(a)) {
2509               return false;
2510             }
2511             break;
2512 #endif
2513           default:
2514             MOZ_CRASH("Bad register type");
2515         }
2516       }
2517     }
2518   }
2519 
2520   return true;
2521 }
2522 
annotateMoveGroups()2523 bool BacktrackingAllocator::annotateMoveGroups() {
2524   // Annotate move groups in the LIR graph with any register that is not
2525   // allocated at that point and can be used as a scratch register. This is
2526   // only required for x86, as other platforms always have scratch registers
2527   // available for use.
2528 #ifdef JS_CODEGEN_X86
2529   LiveRange* range =
2530       LiveRange::FallibleNew(alloc(), 0, CodePosition(), CodePosition().next());
2531   if (!range) {
2532     return false;
2533   }
2534 
2535   for (size_t i = 0; i < graph.numBlocks(); i++) {
2536     if (mir->shouldCancel("Backtracking Annotate Move Groups")) {
2537       return false;
2538     }
2539 
2540     LBlock* block = graph.getBlock(i);
2541     LInstruction* last = nullptr;
2542     for (LInstructionIterator iter = block->begin(); iter != block->end();
2543          ++iter) {
2544       if (iter->isMoveGroup()) {
2545         CodePosition from = last ? outputOf(last) : entryOf(block);
2546         range->setTo(from.next());
2547         range->setFrom(from);
2548 
2549         for (size_t i = 0; i < AnyRegister::Total; i++) {
2550           PhysicalRegister& reg = registers[i];
2551           if (reg.reg.isFloat() || !reg.allocatable) {
2552             continue;
2553           }
2554 
2555           // This register is unavailable for use if (a) it is in use
2556           // by some live range immediately before the move group,
2557           // or (b) it is an operand in one of the group's moves. The
2558           // latter case handles live ranges which end immediately
2559           // before the move group or start immediately after.
2560           // For (b) we need to consider move groups immediately
2561           // preceding or following this one.
2562 
2563           if (iter->toMoveGroup()->uses(reg.reg.gpr())) {
2564             continue;
2565           }
2566           bool found = false;
2567           LInstructionIterator niter(iter);
2568           for (niter++; niter != block->end(); niter++) {
2569             if (niter->isMoveGroup()) {
2570               if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
2571                 found = true;
2572                 break;
2573               }
2574             } else {
2575               break;
2576             }
2577           }
2578           if (iter != block->begin()) {
2579             LInstructionIterator riter(iter);
2580             do {
2581               riter--;
2582               if (riter->isMoveGroup()) {
2583                 if (riter->toMoveGroup()->uses(reg.reg.gpr())) {
2584                   found = true;
2585                   break;
2586                 }
2587               } else {
2588                 break;
2589               }
2590             } while (riter != block->begin());
2591           }
2592 
2593           LiveRange* existing;
2594           if (found || reg.allocations.contains(range, &existing)) {
2595             continue;
2596           }
2597 
2598           iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
2599           break;
2600         }
2601       } else {
2602         last = *iter;
2603       }
2604     }
2605   }
2606 #endif
2607 
2608   return true;
2609 }
2610 
2611 /////////////////////////////////////////////////////////////////////
2612 // Debugging methods
2613 /////////////////////////////////////////////////////////////////////
2614 
2615 #ifdef JS_JITSPEW
2616 
toString() const2617 UniqueChars LiveRange::toString() const {
2618   AutoEnterOOMUnsafeRegion oomUnsafe;
2619 
2620   UniqueChars buf = JS_smprintf("v%u [%u,%u)", hasVreg() ? vreg() : 0,
2621                                 from().bits(), to().bits());
2622 
2623   if (buf && bundle() && !bundle()->allocation().isBogus()) {
2624     buf = JS_sprintf_append(std::move(buf), " %s",
2625                             bundle()->allocation().toString().get());
2626   }
2627 
2628   if (buf && hasDefinition()) {
2629     buf = JS_sprintf_append(std::move(buf), " (def)");
2630   }
2631 
2632   for (UsePositionIterator iter = usesBegin(); buf && iter; iter++) {
2633     buf = JS_sprintf_append(std::move(buf), " %s@%u",
2634                             iter->use()->toString().get(), iter->pos.bits());
2635   }
2636 
2637   if (!buf) {
2638     oomUnsafe.crash("LiveRange::toString()");
2639   }
2640 
2641   return buf;
2642 }
2643 
toString() const2644 UniqueChars LiveBundle::toString() const {
2645   AutoEnterOOMUnsafeRegion oomUnsafe;
2646 
2647   // Suppress -Wformat warning.
2648   UniqueChars buf = JS_smprintf("%s", "");
2649 
2650   for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter;
2651        iter++) {
2652     buf = JS_sprintf_append(std::move(buf), "%s %s",
2653                             (iter == rangesBegin()) ? "" : " ##",
2654                             LiveRange::get(*iter)->toString().get());
2655   }
2656 
2657   if (!buf) {
2658     oomUnsafe.crash("LiveBundle::toString()");
2659   }
2660 
2661   return buf;
2662 }
2663 
2664 #endif  // JS_JITSPEW
2665 
dumpVregs(const char * who)2666 void BacktrackingAllocator::dumpVregs(const char* who) {
2667 #ifdef JS_JITSPEW
2668   MOZ_ASSERT(!vregs[0u].hasRanges());
2669 
2670   JitSpewCont(JitSpew_RegAlloc, "\n");
2671   JitSpew(JitSpew_RegAlloc, "Live ranges by virtual register (%s):", who);
2672 
2673   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2674     JitSpewHeader(JitSpew_RegAlloc);
2675     JitSpewCont(JitSpew_RegAlloc, "  ");
2676     VirtualRegister& reg = vregs[i];
2677     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2678          iter++) {
2679       if (iter != reg.rangesBegin()) {
2680         JitSpewCont(JitSpew_RegAlloc, " ## ");
2681       }
2682       JitSpewCont(JitSpew_RegAlloc, "%s",
2683                   LiveRange::get(*iter)->toString().get());
2684     }
2685     JitSpewCont(JitSpew_RegAlloc, "\n");
2686   }
2687 
2688   JitSpewCont(JitSpew_RegAlloc, "\n");
2689   JitSpew(JitSpew_RegAlloc, "Live ranges by bundle (%s):", who);
2690 
2691   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2692     VirtualRegister& reg = vregs[i];
2693     for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter;
2694          baseIter++) {
2695       LiveRange* range = LiveRange::get(*baseIter);
2696       LiveBundle* bundle = range->bundle();
2697       if (range == bundle->firstRange()) {
2698         JitSpewHeader(JitSpew_RegAlloc);
2699         JitSpewCont(JitSpew_RegAlloc, "  ");
2700         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2701              iter++) {
2702           if (iter != bundle->rangesBegin()) {
2703             JitSpewCont(JitSpew_RegAlloc, " ## ");
2704           }
2705           JitSpewCont(JitSpew_RegAlloc, "%s",
2706                       LiveRange::get(*iter)->toString().get());
2707         }
2708         JitSpewCont(JitSpew_RegAlloc, "\n");
2709       }
2710     }
2711   }
2712 #endif
2713 }
2714 
2715 #ifdef JS_JITSPEW
2716 struct BacktrackingAllocator::PrintLiveRange {
2717   bool& first_;
2718 
PrintLiveRangeBacktrackingAllocator::PrintLiveRange2719   explicit PrintLiveRange(bool& first) : first_(first) {}
2720 
operator ()BacktrackingAllocator::PrintLiveRange2721   void operator()(const LiveRange* range) {
2722     if (first_) {
2723       first_ = false;
2724     } else {
2725       fprintf(stderr, " /");
2726     }
2727     fprintf(stderr, " %s", range->toString().get());
2728   }
2729 };
2730 #endif
2731 
dumpAllocations()2732 void BacktrackingAllocator::dumpAllocations() {
2733 #ifdef JS_JITSPEW
2734   JitSpew(JitSpew_RegAlloc, "Allocations:");
2735 
2736   dumpVregs("in dumpAllocations()");
2737 
2738   JitSpewCont(JitSpew_RegAlloc, "\n");
2739   JitSpew(JitSpew_RegAlloc, "Allocations by physical register:");
2740 
2741   for (size_t i = 0; i < AnyRegister::Total; i++) {
2742     if (registers[i].allocatable && !registers[i].allocations.empty()) {
2743       JitSpewHeader(JitSpew_RegAlloc);
2744       JitSpewCont(JitSpew_RegAlloc, "  %s:", AnyRegister::FromCode(i).name());
2745       bool first = true;
2746       registers[i].allocations.forEach(PrintLiveRange(first));
2747       JitSpewCont(JitSpew_RegAlloc, "\n");
2748     }
2749   }
2750 
2751   JitSpewCont(JitSpew_RegAlloc, "\n");
2752 #endif  // JS_JITSPEW
2753 }
2754 
2755 ///////////////////////////////////////////////////////////////////////////////
2756 // Heuristic Methods
2757 ///////////////////////////////////////////////////////////////////////////////
2758 
computePriority(LiveBundle * bundle)2759 size_t BacktrackingAllocator::computePriority(LiveBundle* bundle) {
2760   // The priority of a bundle is its total length, so that longer lived
2761   // bundles will be processed before shorter ones (even if the longer ones
2762   // have a low spill weight). See processBundle().
2763   size_t lifetimeTotal = 0;
2764 
2765   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2766        iter++) {
2767     LiveRange* range = LiveRange::get(*iter);
2768     lifetimeTotal += range->to() - range->from();
2769   }
2770 
2771   return lifetimeTotal;
2772 }
2773 
minimalDef(LiveRange * range,LNode * ins)2774 bool BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) {
2775   // Whether this is a minimal range capturing a definition at ins.
2776   return (range->to() <= minimalDefEnd(ins).next()) &&
2777          ((!ins->isPhi() && range->from() == inputOf(ins)) ||
2778           range->from() == outputOf(ins));
2779 }
2780 
minimalUse(LiveRange * range,UsePosition * use)2781 bool BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use) {
2782   // Whether this is a minimal range capturing |use|.
2783   LNode* ins = insData[use->pos];
2784   return (range->from() == inputOf(ins)) &&
2785          (range->to() ==
2786           (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
2787 }
2788 
minimalBundle(LiveBundle * bundle,bool * pfixed)2789 bool BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) {
2790   LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
2791   LiveRange* range = LiveRange::get(*iter);
2792 
2793   if (!range->hasVreg()) {
2794     *pfixed = true;
2795     return true;
2796   }
2797 
2798   // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
2799   // each range into a separate bundle.
2800   if (++iter) {
2801     return false;
2802   }
2803 
2804   if (range->hasDefinition()) {
2805     VirtualRegister& reg = vregs[range->vreg()];
2806     if (pfixed) {
2807       *pfixed = reg.def()->policy() == LDefinition::FIXED &&
2808                 reg.def()->output()->isRegister();
2809     }
2810     return minimalDef(range, reg.ins());
2811   }
2812 
2813   bool fixed = false, minimal = false, multiple = false;
2814 
2815   for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
2816     if (iter != range->usesBegin()) {
2817       multiple = true;
2818     }
2819 
2820     switch (iter->usePolicy()) {
2821       case LUse::FIXED:
2822         if (fixed) {
2823           return false;
2824         }
2825         fixed = true;
2826         if (minimalUse(range, *iter)) {
2827           minimal = true;
2828         }
2829         break;
2830 
2831       case LUse::REGISTER:
2832         if (minimalUse(range, *iter)) {
2833           minimal = true;
2834         }
2835         break;
2836 
2837       default:
2838         break;
2839     }
2840   }
2841 
2842   // If a range contains a fixed use and at least one other use,
2843   // splitAtAllRegisterUses will split each use into a different bundle.
2844   if (multiple && fixed) {
2845     minimal = false;
2846   }
2847 
2848   if (pfixed) {
2849     *pfixed = fixed;
2850   }
2851   return minimal;
2852 }
2853 
computeSpillWeight(LiveBundle * bundle)2854 size_t BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) {
2855   // Minimal bundles have an extremely high spill weight, to ensure they
2856   // can evict any other bundles and be allocated to a register.
2857   bool fixed;
2858   if (minimalBundle(bundle, &fixed)) {
2859     return fixed ? 2000000 : 1000000;
2860   }
2861 
2862   size_t usesTotal = 0;
2863   fixed = false;
2864 
2865   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2866        iter++) {
2867     LiveRange* range = LiveRange::get(*iter);
2868 
2869     if (range->hasDefinition()) {
2870       VirtualRegister& reg = vregs[range->vreg()];
2871       if (reg.def()->policy() == LDefinition::FIXED &&
2872           reg.def()->output()->isRegister()) {
2873         usesTotal += 2000;
2874         fixed = true;
2875       } else if (!reg.ins()->isPhi()) {
2876         usesTotal += 2000;
2877       }
2878     }
2879 
2880     usesTotal += range->usesSpillWeight();
2881     if (range->numFixedUses() > 0) {
2882       fixed = true;
2883     }
2884   }
2885 
2886   // Bundles with fixed uses are given a higher spill weight, since they must
2887   // be allocated to a specific register.
2888   if (testbed && fixed) {
2889     usesTotal *= 2;
2890   }
2891 
2892   // Compute spill weight as a use density, lowering the weight for long
2893   // lived bundles with relatively few uses.
2894   size_t lifetimeTotal = computePriority(bundle);
2895   return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
2896 }
2897 
maximumSpillWeight(const LiveBundleVector & bundles)2898 size_t BacktrackingAllocator::maximumSpillWeight(
2899     const LiveBundleVector& bundles) {
2900   size_t maxWeight = 0;
2901   for (size_t i = 0; i < bundles.length(); i++) {
2902     maxWeight = std::max(maxWeight, computeSpillWeight(bundles[i]));
2903   }
2904   return maxWeight;
2905 }
2906 
trySplitAcrossHotcode(LiveBundle * bundle,bool * success)2907 bool BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle,
2908                                                   bool* success) {
2909   // If this bundle has portions that are hot and portions that are cold,
2910   // split it at the boundaries between hot and cold code.
2911 
2912   LiveRange* hotRange = nullptr;
2913 
2914   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2915        iter++) {
2916     LiveRange* range = LiveRange::get(*iter);
2917     if (hotcode.contains(range, &hotRange)) {
2918       break;
2919     }
2920   }
2921 
2922   // Don't split if there is no hot code in the bundle.
2923   if (!hotRange) {
2924     JitSpew(JitSpew_RegAlloc, "  bundle does not contain hot code");
2925     return true;
2926   }
2927 
2928   // Don't split if there is no cold code in the bundle.
2929   bool coldCode = false;
2930   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2931        iter++) {
2932     LiveRange* range = LiveRange::get(*iter);
2933     if (!hotRange->contains(range)) {
2934       coldCode = true;
2935       break;
2936     }
2937   }
2938   if (!coldCode) {
2939     JitSpew(JitSpew_RegAlloc, "  bundle does not contain cold code");
2940     return true;
2941   }
2942 
2943   JitSpewIfEnabled(JitSpew_RegAlloc, "  split across hot range %s",
2944                    hotRange->toString().get());
2945 
2946   // Tweak the splitting method when compiling wasm code to look at actual
2947   // uses within the hot/cold code. This heuristic is in place as the below
2948   // mechanism regresses several asm.js tests. Hopefully this will be fixed
2949   // soon and this special case removed. See bug 948838.
2950   if (compilingWasm()) {
2951     SplitPositionVector splitPositions;
2952     if (!splitPositions.append(hotRange->from()) ||
2953         !splitPositions.append(hotRange->to())) {
2954       return false;
2955     }
2956     *success = true;
2957     return splitAt(bundle, splitPositions);
2958   }
2959 
2960   LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
2961                                                   bundle->spillParent());
2962   if (!hotBundle) {
2963     return false;
2964   }
2965   LiveBundle* preBundle = nullptr;
2966   LiveBundle* postBundle = nullptr;
2967   LiveBundle* coldBundle = nullptr;
2968 
2969   if (testbed) {
2970     coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
2971                                          bundle->spillParent());
2972     if (!coldBundle) {
2973       return false;
2974     }
2975   }
2976 
2977   // Accumulate the ranges of hot and cold code in the bundle. Note that
2978   // we are only comparing with the single hot range found, so the cold code
2979   // may contain separate hot ranges.
2980   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2981        iter++) {
2982     LiveRange* range = LiveRange::get(*iter);
2983     LiveRange::Range hot, coldPre, coldPost;
2984     range->intersect(hotRange, &coldPre, &hot, &coldPost);
2985 
2986     if (!hot.empty()) {
2987       if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from,
2988                                                 hot.to)) {
2989         return false;
2990       }
2991     }
2992 
2993     if (!coldPre.empty()) {
2994       if (testbed) {
2995         if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
2996                                                    coldPre.to)) {
2997           return false;
2998         }
2999       } else {
3000         if (!preBundle) {
3001           preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3002                                               bundle->spillParent());
3003           if (!preBundle) {
3004             return false;
3005           }
3006         }
3007         if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
3008                                                   coldPre.to)) {
3009           return false;
3010         }
3011       }
3012     }
3013 
3014     if (!coldPost.empty()) {
3015       if (testbed) {
3016         if (!coldBundle->addRangeAndDistributeUses(
3017                 alloc(), range, coldPost.from, coldPost.to)) {
3018           return false;
3019         }
3020       } else {
3021         if (!postBundle) {
3022           postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3023                                                bundle->spillParent());
3024           if (!postBundle) {
3025             return false;
3026           }
3027         }
3028         if (!postBundle->addRangeAndDistributeUses(
3029                 alloc(), range, coldPost.from, coldPost.to)) {
3030           return false;
3031         }
3032       }
3033     }
3034   }
3035 
3036   MOZ_ASSERT(hotBundle->numRanges() != 0);
3037 
3038   LiveBundleVector newBundles;
3039   if (!newBundles.append(hotBundle)) {
3040     return false;
3041   }
3042 
3043   if (testbed) {
3044     MOZ_ASSERT(coldBundle->numRanges() != 0);
3045     if (!newBundles.append(coldBundle)) {
3046       return false;
3047     }
3048   } else {
3049     MOZ_ASSERT(preBundle || postBundle);
3050     if (preBundle && !newBundles.append(preBundle)) {
3051       return false;
3052     }
3053     if (postBundle && !newBundles.append(postBundle)) {
3054       return false;
3055     }
3056   }
3057 
3058   *success = true;
3059   return splitAndRequeueBundles(bundle, newBundles);
3060 }
3061 
trySplitAfterLastRegisterUse(LiveBundle * bundle,LiveBundle * conflict,bool * success)3062 bool BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle,
3063                                                          LiveBundle* conflict,
3064                                                          bool* success) {
3065   // If this bundle's later uses do not require it to be in a register,
3066   // split it after the last use which does require a register. If conflict
3067   // is specified, only consider register uses before the conflict starts.
3068 
3069   CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
3070 
3071   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3072        iter++) {
3073     LiveRange* range = LiveRange::get(*iter);
3074 
3075     // If the range defines a register, consider that a register use for
3076     // our purposes here.
3077     if (isRegisterDefinition(range)) {
3078       CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
3079       if (!conflict || spillStart < conflict->firstRange()->from()) {
3080         lastUse = lastRegisterFrom = range->from();
3081         lastRegisterTo = spillStart;
3082       }
3083     }
3084 
3085     for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
3086       LNode* ins = insData[iter->pos];
3087 
3088       // Uses in the bundle should be sorted.
3089       MOZ_ASSERT(iter->pos >= lastUse);
3090       lastUse = inputOf(ins);
3091 
3092       if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
3093         if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
3094           lastRegisterFrom = inputOf(ins);
3095           lastRegisterTo = iter->pos.next();
3096         }
3097       }
3098     }
3099   }
3100 
3101   // Can't trim non-register uses off the end by splitting.
3102   if (!lastRegisterFrom.bits()) {
3103     JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
3104     return true;
3105   }
3106   if (lastUse < lastRegisterTo) {
3107     JitSpew(JitSpew_RegAlloc, "  bundle's last use is a register use");
3108     return true;
3109   }
3110 
3111   JitSpewIfEnabled(JitSpew_RegAlloc, "  split after last register use at %u",
3112                    lastRegisterTo.bits());
3113 
3114   SplitPositionVector splitPositions;
3115   if (!splitPositions.append(lastRegisterTo)) {
3116     return false;
3117   }
3118   *success = true;
3119   return splitAt(bundle, splitPositions);
3120 }
3121 
trySplitBeforeFirstRegisterUse(LiveBundle * bundle,LiveBundle * conflict,bool * success)3122 bool BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
3123                                                            LiveBundle* conflict,
3124                                                            bool* success) {
3125   // If this bundle's earlier uses do not require it to be in a register,
3126   // split it before the first use which does require a register. If conflict
3127   // is specified, only consider register uses after the conflict ends.
3128 
3129   if (isRegisterDefinition(bundle->firstRange())) {
3130     JitSpew(JitSpew_RegAlloc, "  bundle is defined by a register");
3131     return true;
3132   }
3133   if (!bundle->firstRange()->hasDefinition()) {
3134     JitSpew(JitSpew_RegAlloc, "  bundle does not have definition");
3135     return true;
3136   }
3137 
3138   CodePosition firstRegisterFrom;
3139 
3140   CodePosition conflictEnd;
3141   if (conflict) {
3142     for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter;
3143          iter++) {
3144       LiveRange* range = LiveRange::get(*iter);
3145       if (range->to() > conflictEnd) {
3146         conflictEnd = range->to();
3147       }
3148     }
3149   }
3150 
3151   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3152        iter++) {
3153     LiveRange* range = LiveRange::get(*iter);
3154 
3155     if (!conflict || range->from() > conflictEnd) {
3156       if (range->hasDefinition() && isRegisterDefinition(range)) {
3157         firstRegisterFrom = range->from();
3158         break;
3159       }
3160     }
3161 
3162     for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
3163       LNode* ins = insData[iter->pos];
3164 
3165       if (!conflict || outputOf(ins) >= conflictEnd) {
3166         if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
3167           firstRegisterFrom = inputOf(ins);
3168           break;
3169         }
3170       }
3171     }
3172     if (firstRegisterFrom.bits()) {
3173       break;
3174     }
3175   }
3176 
3177   if (!firstRegisterFrom.bits()) {
3178     // Can't trim non-register uses off the beginning by splitting.
3179     JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
3180     return true;
3181   }
3182 
3183   JitSpewIfEnabled(JitSpew_RegAlloc, "  split before first register use at %u",
3184                    firstRegisterFrom.bits());
3185 
3186   SplitPositionVector splitPositions;
3187   if (!splitPositions.append(firstRegisterFrom)) {
3188     return false;
3189   }
3190   *success = true;
3191   return splitAt(bundle, splitPositions);
3192 }
3193 
3194 // When splitting a bundle according to a list of split positions, return
3195 // whether a use or range at |pos| should use a different bundle than the last
3196 // position this was called for.
UseNewBundle(const SplitPositionVector & splitPositions,CodePosition pos,size_t * activeSplitPosition)3197 static bool UseNewBundle(const SplitPositionVector& splitPositions,
3198                          CodePosition pos, size_t* activeSplitPosition) {
3199   if (splitPositions.empty()) {
3200     // When the split positions are empty we are splitting at all uses.
3201     return true;
3202   }
3203 
3204   if (*activeSplitPosition == splitPositions.length()) {
3205     // We've advanced past all split positions.
3206     return false;
3207   }
3208 
3209   if (splitPositions[*activeSplitPosition] > pos) {
3210     // We haven't gotten to the next split position yet.
3211     return false;
3212   }
3213 
3214   // We've advanced past the next split position, find the next one which we
3215   // should split at.
3216   while (*activeSplitPosition < splitPositions.length() &&
3217          splitPositions[*activeSplitPosition] <= pos) {
3218     (*activeSplitPosition)++;
3219   }
3220   return true;
3221 }
3222 
HasPrecedingRangeSharingVreg(LiveBundle * bundle,LiveRange * range)3223 static bool HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
3224   MOZ_ASSERT(range->bundle() == bundle);
3225 
3226   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3227        iter++) {
3228     LiveRange* prevRange = LiveRange::get(*iter);
3229     if (prevRange == range) {
3230       return false;
3231     }
3232     if (prevRange->vreg() == range->vreg()) {
3233       return true;
3234     }
3235   }
3236 
3237   MOZ_CRASH();
3238 }
3239 
HasFollowingRangeSharingVreg(LiveBundle * bundle,LiveRange * range)3240 static bool HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
3241   MOZ_ASSERT(range->bundle() == bundle);
3242 
3243   bool foundRange = false;
3244   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3245        iter++) {
3246     LiveRange* prevRange = LiveRange::get(*iter);
3247     if (foundRange && prevRange->vreg() == range->vreg()) {
3248       return true;
3249     }
3250     if (prevRange == range) {
3251       foundRange = true;
3252     }
3253   }
3254 
3255   MOZ_ASSERT(foundRange);
3256   return false;
3257 }
3258 
splitAt(LiveBundle * bundle,const SplitPositionVector & splitPositions)3259 bool BacktrackingAllocator::splitAt(LiveBundle* bundle,
3260                                     const SplitPositionVector& splitPositions) {
3261   // Split the bundle at the given split points. Register uses which have no
3262   // intervening split points are consolidated into the same bundle. If the
3263   // list of split points is empty, then all register uses are placed in
3264   // minimal bundles.
3265 
3266   // splitPositions should be sorted.
3267   for (size_t i = 1; i < splitPositions.length(); ++i) {
3268     MOZ_ASSERT(splitPositions[i - 1] < splitPositions[i]);
3269   }
3270 
3271   // We don't need to create a new spill bundle if there already is one.
3272   bool spillBundleIsNew = false;
3273   LiveBundle* spillBundle = bundle->spillParent();
3274   if (!spillBundle) {
3275     spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
3276     if (!spillBundle) {
3277       return false;
3278     }
3279     spillBundleIsNew = true;
3280 
3281     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3282          iter++) {
3283       LiveRange* range = LiveRange::get(*iter);
3284 
3285       CodePosition from = range->from();
3286       if (isRegisterDefinition(range)) {
3287         from = minimalDefEnd(insData[from]).next();
3288       }
3289 
3290       if (from < range->to()) {
3291         if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to())) {
3292           return false;
3293         }
3294 
3295         if (range->hasDefinition() && !isRegisterDefinition(range)) {
3296           spillBundle->lastRange()->setHasDefinition();
3297         }
3298       }
3299     }
3300   }
3301 
3302   LiveBundleVector newBundles;
3303 
3304   // The bundle which ranges are currently being added to.
3305   LiveBundle* activeBundle =
3306       LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3307   if (!activeBundle || !newBundles.append(activeBundle)) {
3308     return false;
3309   }
3310 
3311   // State for use by UseNewBundle.
3312   size_t activeSplitPosition = 0;
3313 
3314   // Make new bundles according to the split positions, and distribute ranges
3315   // and uses to them.
3316   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3317        iter++) {
3318     LiveRange* range = LiveRange::get(*iter);
3319 
3320     if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
3321       activeBundle =
3322           LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3323       if (!activeBundle || !newBundles.append(activeBundle)) {
3324         return false;
3325       }
3326     }
3327 
3328     LiveRange* activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
3329                                                     range->from(), range->to());
3330     if (!activeRange) {
3331       return false;
3332     }
3333     activeBundle->addRange(activeRange);
3334 
3335     if (isRegisterDefinition(range)) {
3336       activeRange->setHasDefinition();
3337     }
3338 
3339     while (range->hasUses()) {
3340       UsePosition* use = range->popUse();
3341       LNode* ins = insData[use->pos];
3342 
3343       // Any uses of a register that appear before its definition has
3344       // finished must be associated with the range for that definition.
3345       if (isRegisterDefinition(range) &&
3346           use->pos <= minimalDefEnd(insData[range->from()])) {
3347         activeRange->addUse(use);
3348       } else if (isRegisterUse(use, ins)) {
3349         // Place this register use into a different bundle from the
3350         // last one if there are any split points between the two uses.
3351         // UseNewBundle always returns true if we are splitting at all
3352         // register uses, but we can still reuse the last range and
3353         // bundle if they have uses at the same position, except when
3354         // either use is fixed (the two uses might require incompatible
3355         // registers.)
3356         if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
3357             (!activeRange->hasUses() ||
3358              activeRange->usesBegin()->pos != use->pos ||
3359              activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
3360              use->usePolicy() == LUse::FIXED)) {
3361           activeBundle =
3362               LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3363           if (!activeBundle || !newBundles.append(activeBundle)) {
3364             return false;
3365           }
3366           activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
3367                                                range->from(), range->to());
3368           if (!activeRange) {
3369             return false;
3370           }
3371           activeBundle->addRange(activeRange);
3372         }
3373 
3374         activeRange->addUse(use);
3375       } else {
3376         MOZ_ASSERT(spillBundleIsNew);
3377         spillBundle->rangeFor(use->pos)->addUse(use);
3378       }
3379     }
3380   }
3381 
3382   LiveBundleVector filteredBundles;
3383 
3384   // Trim the ends of ranges in each new bundle when there are no other
3385   // earlier or later ranges in the same bundle with the same vreg.
3386   for (size_t i = 0; i < newBundles.length(); i++) {
3387     LiveBundle* bundle = newBundles[i];
3388 
3389     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;) {
3390       LiveRange* range = LiveRange::get(*iter);
3391 
3392       if (!range->hasDefinition()) {
3393         if (!HasPrecedingRangeSharingVreg(bundle, range)) {
3394           if (range->hasUses()) {
3395             UsePosition* use = *range->usesBegin();
3396             range->setFrom(inputOf(insData[use->pos]));
3397           } else {
3398             bundle->removeRangeAndIncrementIterator(iter);
3399             continue;
3400           }
3401         }
3402       }
3403 
3404       if (!HasFollowingRangeSharingVreg(bundle, range)) {
3405         if (range->hasUses()) {
3406           UsePosition* use = range->lastUse();
3407           range->setTo(use->pos.next());
3408         } else if (range->hasDefinition()) {
3409           range->setTo(minimalDefEnd(insData[range->from()]).next());
3410         } else {
3411           bundle->removeRangeAndIncrementIterator(iter);
3412           continue;
3413         }
3414       }
3415 
3416       iter++;
3417     }
3418 
3419     if (bundle->hasRanges() && !filteredBundles.append(bundle)) {
3420       return false;
3421     }
3422   }
3423 
3424   if (spillBundleIsNew && !filteredBundles.append(spillBundle)) {
3425     return false;
3426   }
3427 
3428   return splitAndRequeueBundles(bundle, filteredBundles);
3429 }
3430 
splitAcrossCalls(LiveBundle * bundle)3431 bool BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) {
3432   // Split the bundle to separate register uses and non-register uses and
3433   // allow the vreg to be spilled across its range.
3434 
3435   // Find the locations of all calls in the bundle's range.
3436   SplitPositionVector callPositions;
3437   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3438        iter++) {
3439     LiveRange* range = LiveRange::get(*iter);
3440     CallRange searchRange(range->from(), range->to());
3441     CallRange* callRange;
3442     if (!callRanges.contains(&searchRange, &callRange)) {
3443       // There are no calls inside this range.
3444       continue;
3445     }
3446     MOZ_ASSERT(range->covers(callRange->range.from));
3447 
3448     // The search above returns an arbitrary call within the range. Walk
3449     // backwards to find the first call in the range.
3450     for (CallRangeList::reverse_iterator riter =
3451              callRangesList.rbegin(callRange);
3452          riter != callRangesList.rend(); ++riter) {
3453       CodePosition pos = riter->range.from;
3454       if (range->covers(pos)) {
3455         callRange = *riter;
3456       } else {
3457         break;
3458       }
3459     }
3460 
3461     // Add all call positions within the range, by walking forwards.
3462     for (CallRangeList::iterator iter = callRangesList.begin(callRange);
3463          iter != callRangesList.end(); ++iter) {
3464       CodePosition pos = iter->range.from;
3465       if (!range->covers(pos)) {
3466         break;
3467       }
3468 
3469       // Calls at the beginning of the range are ignored; there is no splitting
3470       // to do.
3471       if (range->covers(pos.previous())) {
3472         MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
3473         if (!callPositions.append(pos)) {
3474           return false;
3475         }
3476       }
3477     }
3478   }
3479   MOZ_ASSERT(callPositions.length());
3480 
3481 #ifdef JS_JITSPEW
3482   JitSpewStart(JitSpew_RegAlloc, "  split across calls at ");
3483   for (size_t i = 0; i < callPositions.length(); ++i) {
3484     JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "",
3485                 callPositions[i].bits());
3486   }
3487   JitSpewFin(JitSpew_RegAlloc);
3488 #endif
3489 
3490   return splitAt(bundle, callPositions);
3491 }
3492 
chooseBundleSplit(LiveBundle * bundle,bool fixed,LiveBundle * conflict)3493 bool BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed,
3494                                               LiveBundle* conflict) {
3495   bool success = false;
3496 
3497   if (!trySplitAcrossHotcode(bundle, &success)) {
3498     return false;
3499   }
3500   if (success) {
3501     return true;
3502   }
3503 
3504   if (fixed) {
3505     return splitAcrossCalls(bundle);
3506   }
3507 
3508   if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) {
3509     return false;
3510   }
3511   if (success) {
3512     return true;
3513   }
3514 
3515   if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) {
3516     return false;
3517   }
3518   if (success) {
3519     return true;
3520   }
3521 
3522   // Split at all register uses.
3523   SplitPositionVector emptyPositions;
3524   return splitAt(bundle, emptyPositions);
3525 }
3526