1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/BacktrackingAllocator.h"
8 
9 #include <algorithm>
10 
11 #include "jit/BitSet.h"
12 #include "jit/CompileInfo.h"
13 #include "js/Printf.h"
14 
15 using namespace js;
16 using namespace js::jit;
17 
18 using mozilla::DebugOnly;
19 
20 /////////////////////////////////////////////////////////////////////
21 // Utility
22 /////////////////////////////////////////////////////////////////////
23 
SortBefore(UsePosition * a,UsePosition * b)24 static inline bool SortBefore(UsePosition* a, UsePosition* b) {
25   return a->pos <= b->pos;
26 }
27 
SortBefore(LiveRange::BundleLink * a,LiveRange::BundleLink * b)28 static inline bool SortBefore(LiveRange::BundleLink* a,
29                               LiveRange::BundleLink* b) {
30   LiveRange* rangea = LiveRange::get(a);
31   LiveRange* rangeb = LiveRange::get(b);
32   MOZ_ASSERT(!rangea->intersects(rangeb));
33   return rangea->from() < rangeb->from();
34 }
35 
SortBefore(LiveRange::RegisterLink * a,LiveRange::RegisterLink * b)36 static inline bool SortBefore(LiveRange::RegisterLink* a,
37                               LiveRange::RegisterLink* b) {
38   return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
39 }
40 
41 template <typename T>
InsertSortedList(InlineForwardList<T> & list,T * value)42 static inline void InsertSortedList(InlineForwardList<T>& list, T* value) {
43   if (list.empty()) {
44     list.pushFront(value);
45     return;
46   }
47 
48   if (SortBefore(list.back(), value)) {
49     list.pushBack(value);
50     return;
51   }
52 
53   T* prev = nullptr;
54   for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
55     if (SortBefore(value, *iter)) {
56       break;
57     }
58     prev = *iter;
59   }
60 
61   if (prev) {
62     list.insertAfter(prev, value);
63   } else {
64     list.pushFront(value);
65   }
66 }
67 
CanMergeTypesInBundle(LDefinition::Type a,LDefinition::Type b)68 static bool CanMergeTypesInBundle(LDefinition::Type a, LDefinition::Type b) {
69   // Fast path for the common case.
70   if (a == b) {
71     return true;
72   }
73 
74   // Only merge if the sizes match, so that we don't get confused about the
75   // width of spill slots.
76   return StackSlotAllocator::width(a) == StackSlotAllocator::width(b);
77 }
78 
79 /////////////////////////////////////////////////////////////////////
80 // LiveRange
81 /////////////////////////////////////////////////////////////////////
82 
noteAddedUse(UsePosition * use)83 inline void LiveRange::noteAddedUse(UsePosition* use) {
84   LUse::Policy policy = use->usePolicy();
85   usesSpillWeight_ += BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
86   if (policy == LUse::FIXED) {
87     ++numFixedUses_;
88   }
89 }
90 
noteRemovedUse(UsePosition * use)91 inline void LiveRange::noteRemovedUse(UsePosition* use) {
92   LUse::Policy policy = use->usePolicy();
93   usesSpillWeight_ -= BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
94   if (policy == LUse::FIXED) {
95     --numFixedUses_;
96   }
97   MOZ_ASSERT_IF(!hasUses(), !usesSpillWeight_ && !numFixedUses_);
98 }
99 
addUse(UsePosition * use)100 void LiveRange::addUse(UsePosition* use) {
101   MOZ_ASSERT(covers(use->pos));
102   InsertSortedList(uses_, use);
103   noteAddedUse(use);
104 }
105 
popUse()106 UsePosition* LiveRange::popUse() {
107   UsePosition* ret = uses_.popFront();
108   noteRemovedUse(ret);
109   return ret;
110 }
111 
distributeUses(LiveRange * other)112 void LiveRange::distributeUses(LiveRange* other) {
113   MOZ_ASSERT(&other->vreg() == &vreg());
114   MOZ_ASSERT(this != other);
115 
116   // Move over all uses which fit in |other|'s boundaries.
117   for (UsePositionIterator iter = usesBegin(); iter;) {
118     UsePosition* use = *iter;
119     if (other->covers(use->pos)) {
120       uses_.removeAndIncrement(iter);
121       noteRemovedUse(use);
122       other->addUse(use);
123     } else {
124       iter++;
125     }
126   }
127 
128   // Distribute the definition to |other| as well, if possible.
129   if (hasDefinition() && from() == other->from()) {
130     other->setHasDefinition();
131   }
132 }
133 
contains(LiveRange * other) const134 bool LiveRange::contains(LiveRange* other) const {
135   return from() <= other->from() && to() >= other->to();
136 }
137 
intersect(LiveRange * other,Range * pre,Range * inside,Range * post) const138 void LiveRange::intersect(LiveRange* other, Range* pre, Range* inside,
139                           Range* post) const {
140   MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
141 
142   CodePosition innerFrom = from();
143   if (from() < other->from()) {
144     if (to() < other->from()) {
145       *pre = range_;
146       return;
147     }
148     *pre = Range(from(), other->from());
149     innerFrom = other->from();
150   }
151 
152   CodePosition innerTo = to();
153   if (to() > other->to()) {
154     if (from() >= other->to()) {
155       *post = range_;
156       return;
157     }
158     *post = Range(other->to(), to());
159     innerTo = other->to();
160   }
161 
162   if (innerFrom != innerTo) {
163     *inside = Range(innerFrom, innerTo);
164   }
165 }
166 
intersects(LiveRange * other) const167 bool LiveRange::intersects(LiveRange* other) const {
168   Range pre, inside, post;
169   intersect(other, &pre, &inside, &post);
170   return !inside.empty();
171 }
172 
173 /////////////////////////////////////////////////////////////////////
174 // SpillSet
175 /////////////////////////////////////////////////////////////////////
176 
setAllocation(LAllocation alloc)177 void SpillSet::setAllocation(LAllocation alloc) {
178   for (size_t i = 0; i < numSpilledBundles(); i++) {
179     spilledBundle(i)->setAllocation(alloc);
180   }
181 }
182 
183 /////////////////////////////////////////////////////////////////////
184 // LiveBundle
185 /////////////////////////////////////////////////////////////////////
186 
187 #ifdef DEBUG
numRanges() const188 size_t LiveBundle::numRanges() const {
189   size_t count = 0;
190   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
191     count++;
192   }
193   return count;
194 }
195 #endif  // DEBUG
196 
rangeFor(CodePosition pos) const197 LiveRange* LiveBundle::rangeFor(CodePosition pos) const {
198   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
199     LiveRange* range = LiveRange::get(*iter);
200     if (range->covers(pos)) {
201       return range;
202     }
203   }
204   return nullptr;
205 }
206 
addRange(LiveRange * range)207 void LiveBundle::addRange(LiveRange* range) {
208   MOZ_ASSERT(!range->bundle());
209   range->setBundle(this);
210   InsertSortedList(ranges_, &range->bundleLink);
211 }
212 
addRange(TempAllocator & alloc,VirtualRegister * vreg,CodePosition from,CodePosition to)213 bool LiveBundle::addRange(TempAllocator& alloc, VirtualRegister* vreg,
214                           CodePosition from, CodePosition to) {
215   LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
216   if (!range) {
217     return false;
218   }
219   addRange(range);
220   return true;
221 }
222 
addRangeAndDistributeUses(TempAllocator & alloc,LiveRange * oldRange,CodePosition from,CodePosition to)223 bool LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc,
224                                            LiveRange* oldRange,
225                                            CodePosition from, CodePosition to) {
226   LiveRange* range = LiveRange::FallibleNew(alloc, &oldRange->vreg(), from, to);
227   if (!range) {
228     return false;
229   }
230   addRange(range);
231   oldRange->distributeUses(range);
232   return true;
233 }
234 
popFirstRange()235 LiveRange* LiveBundle::popFirstRange() {
236   LiveRange::BundleLinkIterator iter = rangesBegin();
237   if (!iter) {
238     return nullptr;
239   }
240 
241   LiveRange* range = LiveRange::get(*iter);
242   ranges_.removeAt(iter);
243 
244   range->setBundle(nullptr);
245   return range;
246 }
247 
removeRange(LiveRange * range)248 void LiveBundle::removeRange(LiveRange* range) {
249   for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
250     LiveRange* existing = LiveRange::get(*iter);
251     if (existing == range) {
252       ranges_.removeAt(iter);
253       return;
254     }
255   }
256   MOZ_CRASH();
257 }
258 
259 /////////////////////////////////////////////////////////////////////
260 // VirtualRegister
261 /////////////////////////////////////////////////////////////////////
262 
addInitialRange(TempAllocator & alloc,CodePosition from,CodePosition to,size_t * numRanges)263 bool VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from,
264                                       CodePosition to, size_t* numRanges) {
265   MOZ_ASSERT(from < to);
266 
267   // Mark [from,to) as a live range for this register during the initial
268   // liveness analysis, coalescing with any existing overlapping ranges.
269 
270   // On some pathological graphs there might be a huge number of different
271   // live ranges. Allow non-overlapping live range to be merged if the
272   // number of ranges exceeds the cap below.
273   static const size_t CoalesceLimit = 100000;
274 
275   LiveRange* prev = nullptr;
276   LiveRange* merged = nullptr;
277   for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter;) {
278     LiveRange* existing = LiveRange::get(*iter);
279 
280     if (from > existing->to() && *numRanges < CoalesceLimit) {
281       // The new range should go after this one.
282       prev = existing;
283       iter++;
284       continue;
285     }
286 
287     if (to.next() < existing->from()) {
288       // The new range should go before this one.
289       break;
290     }
291 
292     if (!merged) {
293       // This is the first old range we've found that overlaps the new
294       // range. Extend this one to cover its union with the new range.
295       merged = existing;
296 
297       if (from < existing->from()) {
298         existing->setFrom(from);
299       }
300       if (to > existing->to()) {
301         existing->setTo(to);
302       }
303 
304       // Continue searching to see if any other old ranges can be
305       // coalesced with the new merged range.
306       iter++;
307       continue;
308     }
309 
310     // Coalesce this range into the previous range we merged into.
311     MOZ_ASSERT(existing->from() >= merged->from());
312     if (existing->to() > merged->to()) {
313       merged->setTo(existing->to());
314     }
315 
316     MOZ_ASSERT(!existing->hasDefinition());
317     existing->distributeUses(merged);
318     MOZ_ASSERT(!existing->hasUses());
319 
320     ranges_.removeAndIncrement(iter);
321   }
322 
323   if (!merged) {
324     // The new range does not overlap any existing range for the vreg.
325     LiveRange* range = LiveRange::FallibleNew(alloc, this, from, to);
326     if (!range) {
327       return false;
328     }
329 
330     if (prev) {
331       ranges_.insertAfter(&prev->registerLink, &range->registerLink);
332     } else {
333       ranges_.pushFront(&range->registerLink);
334     }
335 
336     (*numRanges)++;
337   }
338 
339   return true;
340 }
341 
addInitialUse(UsePosition * use)342 void VirtualRegister::addInitialUse(UsePosition* use) {
343   LiveRange::get(*rangesBegin())->addUse(use);
344 }
345 
setInitialDefinition(CodePosition from)346 void VirtualRegister::setInitialDefinition(CodePosition from) {
347   LiveRange* first = LiveRange::get(*rangesBegin());
348   MOZ_ASSERT(from >= first->from());
349   first->setFrom(from);
350   first->setHasDefinition();
351 }
352 
rangeFor(CodePosition pos,bool preferRegister) const353 LiveRange* VirtualRegister::rangeFor(CodePosition pos,
354                                      bool preferRegister /* = false */) const {
355   LiveRange* found = nullptr;
356   for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
357     LiveRange* range = LiveRange::get(*iter);
358     if (range->covers(pos)) {
359       if (!preferRegister || range->bundle()->allocation().isRegister()) {
360         return range;
361       }
362       if (!found) {
363         found = range;
364       }
365     }
366   }
367   return found;
368 }
369 
addRange(LiveRange * range)370 void VirtualRegister::addRange(LiveRange* range) {
371   InsertSortedList(ranges_, &range->registerLink);
372 }
373 
removeRange(LiveRange * range)374 void VirtualRegister::removeRange(LiveRange* range) {
375   for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
376     LiveRange* existing = LiveRange::get(*iter);
377     if (existing == range) {
378       ranges_.removeAt(iter);
379       return;
380     }
381   }
382   MOZ_CRASH();
383 }
384 
385 /////////////////////////////////////////////////////////////////////
386 // BacktrackingAllocator
387 /////////////////////////////////////////////////////////////////////
388 
389 // This function pre-allocates and initializes as much global state as possible
390 // to avoid littering the algorithms with memory management cruft.
init()391 bool BacktrackingAllocator::init() {
392   if (!RegisterAllocator::init()) {
393     return false;
394   }
395 
396   liveIn = mir->allocate<BitSet>(graph.numBlockIds());
397   if (!liveIn) {
398     return false;
399   }
400 
401   size_t numVregs = graph.numVirtualRegisters();
402   if (!vregs.init(mir->alloc(), numVregs)) {
403     return false;
404   }
405   for (uint32_t i = 0; i < numVregs; i++) {
406     new (&vregs[i]) VirtualRegister();
407   }
408 
409   // Build virtual register objects.
410   for (size_t i = 0; i < graph.numBlocks(); i++) {
411     if (mir->shouldCancel("Create data structures (main loop)")) {
412       return false;
413     }
414 
415     LBlock* block = graph.getBlock(i);
416     for (LInstructionIterator ins = block->begin(); ins != block->end();
417          ins++) {
418       if (mir->shouldCancel("Create data structures (inner loop 1)")) {
419         return false;
420       }
421 
422       for (size_t j = 0; j < ins->numDefs(); j++) {
423         LDefinition* def = ins->getDef(j);
424         if (def->isBogusTemp()) {
425           continue;
426         }
427         vreg(def).init(*ins, def, /* isTemp = */ false);
428       }
429 
430       for (size_t j = 0; j < ins->numTemps(); j++) {
431         LDefinition* def = ins->getTemp(j);
432         if (def->isBogusTemp()) {
433           continue;
434         }
435         vreg(def).init(*ins, def, /* isTemp = */ true);
436       }
437     }
438     for (size_t j = 0; j < block->numPhis(); j++) {
439       LPhi* phi = block->getPhi(j);
440       LDefinition* def = phi->getDef(0);
441       vreg(def).init(phi, def, /* isTemp = */ false);
442     }
443   }
444 
445   LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
446   while (!remainingRegisters.emptyGeneral()) {
447     AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
448     registers[reg.code()].allocatable = true;
449   }
450   while (!remainingRegisters.emptyFloat()) {
451     AnyRegister reg =
452         AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());
453     registers[reg.code()].allocatable = true;
454   }
455 
456   LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
457   for (size_t i = 0; i < AnyRegister::Total; i++) {
458     registers[i].reg = AnyRegister::FromCode(i);
459     registers[i].allocations.setAllocator(lifoAlloc);
460   }
461 
462   hotcode.setAllocator(lifoAlloc);
463   callRanges.setAllocator(lifoAlloc);
464 
465   // Partition the graph into hot and cold sections, for helping to make
466   // splitting decisions. Since we don't have any profiling data this is a
467   // crapshoot, so just mark the bodies of inner loops as hot and everything
468   // else as cold.
469 
470   LBlock* backedge = nullptr;
471   for (size_t i = 0; i < graph.numBlocks(); i++) {
472     LBlock* block = graph.getBlock(i);
473 
474     // If we see a loop header, mark the backedge so we know when we have
475     // hit the end of the loop. Don't process the loop immediately, so that
476     // if there is an inner loop we will ignore the outer backedge.
477     if (block->mir()->isLoopHeader()) {
478       backedge = block->mir()->backedge()->lir();
479     }
480 
481     if (block == backedge) {
482       LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
483       LiveRange* range = LiveRange::FallibleNew(
484           alloc(), nullptr, entryOf(header), exitOf(block).next());
485       if (!range || !hotcode.insert(range)) {
486         return false;
487       }
488     }
489   }
490 
491   return true;
492 }
493 
addInitialFixedRange(AnyRegister reg,CodePosition from,CodePosition to)494 bool BacktrackingAllocator::addInitialFixedRange(AnyRegister reg,
495                                                  CodePosition from,
496                                                  CodePosition to) {
497   LiveRange* range = LiveRange::FallibleNew(alloc(), nullptr, from, to);
498   return range && registers[reg.code()].allocations.insert(range);
499 }
500 
501 #ifdef DEBUG
502 // Returns true iff ins has a def/temp reusing the input allocation.
IsInputReused(LInstruction * ins,LUse * use)503 static bool IsInputReused(LInstruction* ins, LUse* use) {
504   for (size_t i = 0; i < ins->numDefs(); i++) {
505     if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
506         ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) {
507       return true;
508     }
509   }
510 
511   for (size_t i = 0; i < ins->numTemps(); i++) {
512     if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
513         ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) {
514       return true;
515     }
516   }
517 
518   return false;
519 }
520 #endif
521 
522 /*
523  * This function builds up liveness ranges for all virtual registers
524  * defined in the function.
525  *
526  * The algorithm is based on the one published in:
527  *
528  * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
529  *     SSA Form." Proceedings of the International Symposium on Code Generation
530  *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
531  *
532  * The algorithm operates on blocks ordered such that dominators of a block
533  * are before the block itself, and such that all blocks of a loop are
534  * contiguous. It proceeds backwards over the instructions in this order,
535  * marking registers live at their uses, ending their live ranges at
536  * definitions, and recording which registers are live at the top of every
537  * block. To deal with loop backedges, registers live at the beginning of
538  * a loop gain a range covering the entire loop.
539  */
buildLivenessInfo()540 bool BacktrackingAllocator::buildLivenessInfo() {
541   JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
542 
543   Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
544   BitSet loopDone(graph.numBlockIds());
545   if (!loopDone.init(alloc())) {
546     return false;
547   }
548 
549   size_t numRanges = 0;
550 
551   for (size_t i = graph.numBlocks(); i > 0; i--) {
552     if (mir->shouldCancel("Build Liveness Info (main loop)")) {
553       return false;
554     }
555 
556     LBlock* block = graph.getBlock(i - 1);
557     MBasicBlock* mblock = block->mir();
558 
559     BitSet& live = liveIn[mblock->id()];
560     new (&live) BitSet(graph.numVirtualRegisters());
561     if (!live.init(alloc())) {
562       return false;
563     }
564 
565     // Propagate liveIn from our successors to us.
566     for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
567       MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
568       // Skip backedges, as we fix them up at the loop header.
569       if (mblock->id() < successor->id()) {
570         live.insertAll(liveIn[successor->id()]);
571       }
572     }
573 
574     // Add successor phis.
575     if (mblock->successorWithPhis()) {
576       LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
577       for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
578         LPhi* phi = phiSuccessor->getPhi(j);
579         LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
580         uint32_t reg = use->toUse()->virtualRegister();
581         live.insert(reg);
582         vreg(use).setUsedByPhi();
583       }
584     }
585 
586     // Registers are assumed alive for the entire block, a define shortens
587     // the range to the point of definition.
588     for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
589       if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block),
590                                              exitOf(block).next(), &numRanges))
591         return false;
592     }
593 
594     // Shorten the front end of ranges for live variables to their point of
595     // definition, if found.
596     for (LInstructionReverseIterator ins = block->rbegin();
597          ins != block->rend(); ins++) {
598       // Calls may clobber registers, so force a spill and reload around the
599       // callsite.
600       if (ins->isCall()) {
601         for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more();
602              ++iter) {
603           bool found = false;
604           for (size_t i = 0; i < ins->numDefs(); i++) {
605             if (ins->getDef(i)->isFixed() &&
606                 ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
607               found = true;
608               break;
609             }
610           }
611           // If this register doesn't have an explicit def above, mark
612           // it as clobbered by the call unless it is actually
613           // call-preserved.
614           if (!found && !ins->isCallPreserved(*iter)) {
615             if (!addInitialFixedRange(*iter, outputOf(*ins),
616                                       outputOf(*ins).next())) {
617               return false;
618             }
619           }
620         }
621 
622         CallRange* callRange = new (alloc().fallible())
623             CallRange(outputOf(*ins), outputOf(*ins).next());
624         if (!callRange) {
625           return false;
626         }
627 
628         callRangesList.pushFront(callRange);
629         if (!callRanges.insert(callRange)) {
630           return false;
631         }
632       }
633 
634       for (size_t i = 0; i < ins->numDefs(); i++) {
635         LDefinition* def = ins->getDef(i);
636         if (def->isBogusTemp()) {
637           continue;
638         }
639 
640         CodePosition from = outputOf(*ins);
641 
642         if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
643           // MUST_REUSE_INPUT is implemented by allocating an output
644           // register and moving the input to it. Register hints are
645           // used to avoid unnecessary moves. We give the input an
646           // LUse::ANY policy to avoid allocating a register for the
647           // input.
648           LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
649           MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
650           MOZ_ASSERT(inputUse->usedAtStart());
651           *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY,
652                            /* usedAtStart = */ true);
653         }
654 
655         if (!vreg(def).addInitialRange(alloc(), from, from.next(),
656                                        &numRanges)) {
657           return false;
658         }
659         vreg(def).setInitialDefinition(from);
660         live.remove(def->virtualRegister());
661       }
662 
663       for (size_t i = 0; i < ins->numTemps(); i++) {
664         LDefinition* temp = ins->getTemp(i);
665         if (temp->isBogusTemp()) {
666           continue;
667         }
668 
669         // Normally temps are considered to cover both the input
670         // and output of the associated instruction. In some cases
671         // though we want to use a fixed register as both an input
672         // and clobbered register in the instruction, so watch for
673         // this and shorten the temp to cover only the output.
674         CodePosition from = inputOf(*ins);
675         if (temp->policy() == LDefinition::FIXED) {
676           AnyRegister reg = temp->output()->toRegister();
677           for (LInstruction::InputIterator alloc(**ins); alloc.more();
678                alloc.next()) {
679             if (alloc->isUse()) {
680               LUse* use = alloc->toUse();
681               if (use->isFixedRegister()) {
682                 if (GetFixedRegister(vreg(use).def(), use) == reg) {
683                   from = outputOf(*ins);
684                 }
685               }
686             }
687           }
688         }
689 
690         // * For non-call instructions, temps cover both the input and output,
691         //   so temps never alias uses (even at-start uses) or defs.
692         // * For call instructions, temps only cover the input (the output is
693         //   used for the force-spill ranges added above). This means temps
694         //   still don't alias uses but they can alias the (fixed) defs. For now
695         //   we conservatively require temps to have a fixed register for call
696         //   instructions to prevent a footgun.
697         MOZ_ASSERT_IF(ins->isCall(), temp->policy() == LDefinition::FIXED);
698         CodePosition to =
699             ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
700 
701         if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges)) {
702           return false;
703         }
704         vreg(temp).setInitialDefinition(from);
705       }
706 
707       DebugOnly<bool> hasUseRegister = false;
708       DebugOnly<bool> hasUseRegisterAtStart = false;
709 
710       for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more();
711            inputAlloc.next()) {
712         if (inputAlloc->isUse()) {
713           LUse* use = inputAlloc->toUse();
714 
715           // Call uses should always be at-start, since calls use all
716           // registers.
717           MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
718                         use->usedAtStart());
719 
720 #ifdef DEBUG
721           // If there are both useRegisterAtStart(x) and useRegister(y)
722           // uses, we may assign the same register to both operands
723           // (bug 772830). Don't allow this for now.
724           if (use->policy() == LUse::REGISTER) {
725             if (use->usedAtStart()) {
726               if (!IsInputReused(*ins, use)) {
727                 hasUseRegisterAtStart = true;
728               }
729             } else {
730               hasUseRegister = true;
731             }
732           }
733           MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
734 #endif
735 
736           // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
737           if (use->policy() == LUse::RECOVERED_INPUT) {
738             continue;
739           }
740 
741           CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
742           if (use->isFixedRegister()) {
743             LAllocation reg(AnyRegister::FromCode(use->registerCode()));
744             for (size_t i = 0; i < ins->numDefs(); i++) {
745               LDefinition* def = ins->getDef(i);
746               if (def->policy() == LDefinition::FIXED &&
747                   *def->output() == reg) {
748                 to = inputOf(*ins);
749               }
750             }
751           }
752 
753           if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(),
754                                          &numRanges)) {
755             return false;
756           }
757           UsePosition* usePosition =
758               new (alloc().fallible()) UsePosition(use, to);
759           if (!usePosition) {
760             return false;
761           }
762           vreg(use).addInitialUse(usePosition);
763           live.insert(use->virtualRegister());
764         }
765       }
766     }
767 
768     // Phis have simultaneous assignment semantics at block begin, so at
769     // the beginning of the block we can be sure that liveIn does not
770     // contain any phi outputs.
771     for (unsigned int i = 0; i < block->numPhis(); i++) {
772       LDefinition* def = block->getPhi(i)->getDef(0);
773       if (live.contains(def->virtualRegister())) {
774         live.remove(def->virtualRegister());
775       } else {
776         // This is a dead phi, so add a dummy range over all phis. This
777         // can go away if we have an earlier dead code elimination pass.
778         CodePosition entryPos = entryOf(block);
779         if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(),
780                                        &numRanges)) {
781           return false;
782         }
783       }
784     }
785 
786     if (mblock->isLoopHeader()) {
787       // A divergence from the published algorithm is required here, as
788       // our block order does not guarantee that blocks of a loop are
789       // contiguous. As a result, a single live range spanning the
790       // loop is not possible. Additionally, we require liveIn in a later
791       // pass for resolution, so that must also be fixed up here.
792       MBasicBlock* loopBlock = mblock->backedge();
793       while (true) {
794         // Blocks must already have been visited to have a liveIn set.
795         MOZ_ASSERT(loopBlock->id() >= mblock->id());
796 
797         // Add a range for this entire loop block
798         CodePosition from = entryOf(loopBlock->lir());
799         CodePosition to = exitOf(loopBlock->lir()).next();
800 
801         for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
802           if (!vregs[*liveRegId].addInitialRange(alloc(), from, to,
803                                                  &numRanges)) {
804             return false;
805           }
806         }
807 
808         // Fix up the liveIn set.
809         liveIn[loopBlock->id()].insertAll(live);
810 
811         // Make sure we don't visit this node again
812         loopDone.insert(loopBlock->id());
813 
814         // If this is the loop header, any predecessors are either the
815         // backedge or out of the loop, so skip any predecessors of
816         // this block
817         if (loopBlock != mblock) {
818           for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
819             MBasicBlock* pred = loopBlock->getPredecessor(i);
820             if (loopDone.contains(pred->id())) {
821               continue;
822             }
823             if (!loopWorkList.append(pred)) {
824               return false;
825             }
826           }
827         }
828 
829         // Terminate loop if out of work.
830         if (loopWorkList.empty()) {
831           break;
832         }
833 
834         // Grab the next block off the work list, skipping any OSR block.
835         MBasicBlock* osrBlock = graph.mir().osrBlock();
836         while (!loopWorkList.empty()) {
837           loopBlock = loopWorkList.popCopy();
838           if (loopBlock != osrBlock) {
839             break;
840           }
841         }
842 
843         // If end is reached without finding a non-OSR block, then no more work
844         // items were found.
845         if (loopBlock == osrBlock) {
846           MOZ_ASSERT(loopWorkList.empty());
847           break;
848         }
849       }
850 
851       // Clear the done set for other loops
852       loopDone.clear();
853     }
854 
855     MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
856   }
857 
858   JitSpew(JitSpew_RegAlloc, "Completed liveness analysis");
859   return true;
860 }
861 
go()862 bool BacktrackingAllocator::go() {
863   JitSpewCont(JitSpew_RegAlloc, "\n");
864   JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
865 
866   JitSpewCont(JitSpew_RegAlloc, "\n");
867   if (JitSpewEnabled(JitSpew_RegAlloc)) {
868     dumpInstructions("(Pre-allocation LIR)");
869   }
870 
871   if (!init()) {
872     return false;
873   }
874 
875   if (!buildLivenessInfo()) {
876     return false;
877   }
878 
879   if (JitSpewEnabled(JitSpew_RegAlloc)) {
880     dumpLiveRangesByVReg("after liveness analysis");
881   }
882 
883   if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) {
884     return false;
885   }
886 
887   JitSpewCont(JitSpew_RegAlloc, "\n");
888   JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
889   if (!mergeAndQueueRegisters()) {
890     return false;
891   }
892   JitSpew(JitSpew_RegAlloc, "Completed grouping and queueing registers");
893 
894   if (JitSpewEnabled(JitSpew_RegAlloc)) {
895     dumpLiveRangesByBundle("after grouping/queueing regs");
896   }
897 
898   JitSpewCont(JitSpew_RegAlloc, "\n");
899   JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
900   JitSpewCont(JitSpew_RegAlloc, "\n");
901 
902   // Allocate, spill and split bundles until finished.
903   while (!allocationQueue.empty()) {
904     if (mir->shouldCancel("Backtracking Allocation")) {
905       return false;
906     }
907 
908     QueueItem item = allocationQueue.removeHighest();
909     if (!processBundle(mir, item.bundle)) {
910       return false;
911     }
912   }
913 
914   JitSpewCont(JitSpew_RegAlloc, "\n");
915   JitSpew(JitSpew_RegAlloc,
916           "Main allocation loop complete; "
917           "beginning spill-bundle allocation loop");
918   JitSpewCont(JitSpew_RegAlloc, "\n");
919 
920   if (!tryAllocatingRegistersForSpillBundles()) {
921     return false;
922   }
923 
924   JitSpewCont(JitSpew_RegAlloc, "\n");
925   JitSpew(JitSpew_RegAlloc, "Spill-bundle allocation loop complete");
926   JitSpewCont(JitSpew_RegAlloc, "\n");
927 
928   if (!pickStackSlots()) {
929     return false;
930   }
931 
932   if (JitSpewEnabled(JitSpew_RegAlloc)) {
933     dumpAllocations();
934   }
935 
936   if (!resolveControlFlow()) {
937     return false;
938   }
939 
940   if (!reifyAllocations()) {
941     return false;
942   }
943 
944   if (!populateSafepoints()) {
945     return false;
946   }
947 
948   if (!annotateMoveGroups()) {
949     return false;
950   }
951 
952   JitSpewCont(JitSpew_RegAlloc, "\n");
953   if (JitSpewEnabled(JitSpew_RegAlloc)) {
954     dumpInstructions("(Post-allocation LIR)");
955   }
956 
957   JitSpew(JitSpew_RegAlloc, "Finished register allocation");
958 
959   return true;
960 }
961 
IsArgumentSlotDefinition(LDefinition * def)962 static bool IsArgumentSlotDefinition(LDefinition* def) {
963   return def->policy() == LDefinition::FIXED && def->output()->isArgument();
964 }
965 
IsThisSlotDefinition(LDefinition * def)966 static bool IsThisSlotDefinition(LDefinition* def) {
967   return IsArgumentSlotDefinition(def) &&
968          def->output()->toArgument()->index() <
969              THIS_FRAME_ARGSLOT + sizeof(Value);
970 }
971 
HasStackPolicy(LDefinition * def)972 static bool HasStackPolicy(LDefinition* def) {
973   return def->policy() == LDefinition::STACK;
974 }
975 
tryMergeBundles(LiveBundle * bundle0,LiveBundle * bundle1)976 bool BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0,
977                                             LiveBundle* bundle1) {
978   // See if bundle0 and bundle1 can be merged together.
979   if (bundle0 == bundle1) {
980     return true;
981   }
982 
983   // Get a representative virtual register from each bundle.
984   VirtualRegister& reg0 = bundle0->firstRange()->vreg();
985   VirtualRegister& reg1 = bundle1->firstRange()->vreg();
986 
987   MOZ_ASSERT(CanMergeTypesInBundle(reg0.type(), reg1.type()));
988   MOZ_ASSERT(reg0.isCompatible(reg1));
989 
990   // Registers which might spill to the frame's |this| slot can only be
991   // grouped with other such registers. The frame's |this| slot must always
992   // hold the |this| value, as required by JitFrame tracing and by the Ion
993   // constructor calling convention.
994   if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
995     if (*reg0.def()->output() != *reg1.def()->output()) {
996       return true;
997     }
998   }
999 
1000   // Registers which might spill to the frame's argument slots can only be
1001   // grouped with other such registers if the frame might access those
1002   // arguments through a lazy arguments object or rest parameter.
1003   if (IsArgumentSlotDefinition(reg0.def()) ||
1004       IsArgumentSlotDefinition(reg1.def())) {
1005     if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
1006       if (*reg0.def()->output() != *reg1.def()->output()) {
1007         return true;
1008       }
1009     }
1010   }
1011 
1012   // When we make a call to a WebAssembly function that returns multiple
1013   // results, some of those results can go on the stack.  The callee is passed a
1014   // pointer to this stack area, which is represented as having policy
1015   // LDefinition::STACK (with type LDefinition::STACKRESULTS).  Individual
1016   // results alias parts of the stack area with a value-appropriate type, but
1017   // policy LDefinition::STACK.  This aliasing between allocations makes it
1018   // unsound to merge anything with a LDefinition::STACK policy.
1019   if (HasStackPolicy(reg0.def()) || HasStackPolicy(reg1.def())) {
1020     return true;
1021   }
1022 
1023   // Limit the number of times we compare ranges if there are many ranges in
1024   // one of the bundles, to avoid quadratic behavior.
1025   static const size_t MAX_RANGES = 200;
1026 
1027   // Make sure that ranges in the bundles do not overlap.
1028   LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(),
1029                                 iter1 = bundle1->rangesBegin();
1030   size_t count = 0;
1031   while (iter0 && iter1) {
1032     if (++count >= MAX_RANGES) {
1033       return true;
1034     }
1035 
1036     LiveRange* range0 = LiveRange::get(*iter0);
1037     LiveRange* range1 = LiveRange::get(*iter1);
1038 
1039     if (range0->from() >= range1->to()) {
1040       iter1++;
1041     } else if (range1->from() >= range0->to()) {
1042       iter0++;
1043     } else {
1044       return true;
1045     }
1046   }
1047 
1048   // Move all ranges from bundle1 into bundle0.
1049   while (LiveRange* range = bundle1->popFirstRange()) {
1050     bundle0->addRange(range);
1051   }
1052 
1053   return true;
1054 }
1055 
FindReusingDefOrTemp(LNode * node,LAllocation * alloc)1056 static inline LDefinition* FindReusingDefOrTemp(LNode* node,
1057                                                 LAllocation* alloc) {
1058   if (node->isPhi()) {
1059     MOZ_ASSERT(node->toPhi()->numDefs() == 1);
1060     MOZ_ASSERT(node->toPhi()->getDef(0)->policy() !=
1061                LDefinition::MUST_REUSE_INPUT);
1062     return nullptr;
1063   }
1064 
1065   LInstruction* ins = node->toInstruction();
1066 
1067   for (size_t i = 0; i < ins->numDefs(); i++) {
1068     LDefinition* def = ins->getDef(i);
1069     if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
1070         ins->getOperand(def->getReusedInput()) == alloc) {
1071       return def;
1072     }
1073   }
1074   for (size_t i = 0; i < ins->numTemps(); i++) {
1075     LDefinition* def = ins->getTemp(i);
1076     if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
1077         ins->getOperand(def->getReusedInput()) == alloc) {
1078       return def;
1079     }
1080   }
1081   return nullptr;
1082 }
1083 
NumReusingDefs(LInstruction * ins)1084 static inline size_t NumReusingDefs(LInstruction* ins) {
1085   size_t num = 0;
1086   for (size_t i = 0; i < ins->numDefs(); i++) {
1087     LDefinition* def = ins->getDef(i);
1088     if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
1089       num++;
1090     }
1091   }
1092   return num;
1093 }
1094 
tryMergeReusedRegister(VirtualRegister & def,VirtualRegister & input)1095 bool BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def,
1096                                                    VirtualRegister& input) {
1097   // def is a vreg which reuses input for its output physical register. Try
1098   // to merge ranges for def with those of input if possible, as avoiding
1099   // copies before def's instruction is crucial for generated code quality
1100   // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
1101 
1102   if (def.rangeFor(inputOf(def.ins()))) {
1103     MOZ_ASSERT(def.isTemp());
1104     def.setMustCopyInput();
1105     return true;
1106   }
1107 
1108   if (!CanMergeTypesInBundle(def.type(), input.type())) {
1109     def.setMustCopyInput();
1110     return true;
1111   }
1112 
1113   LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
1114   if (!inputRange) {
1115     // The input is not live after the instruction, either in a safepoint
1116     // for the instruction or in subsequent code. The input and output
1117     // can thus be in the same group.
1118     return tryMergeBundles(def.firstBundle(), input.firstBundle());
1119   }
1120 
1121   // Avoid merging in very large live ranges as merging has non-linear
1122   // complexity.  The cutoff value is hard to gauge.  1M was chosen because it
1123   // is "large" and yet usefully caps compile time on AutoCad-for-the-web to
1124   // something reasonable on a 2017-era desktop system.
1125   const uint32_t RANGE_SIZE_CUTOFF = 1000000;
1126   if (inputRange->to() - inputRange->from() > RANGE_SIZE_CUTOFF) {
1127     def.setMustCopyInput();
1128     return true;
1129   }
1130 
1131   // The input is live afterwards, either in future instructions or in a
1132   // safepoint for the reusing instruction. This is impossible to satisfy
1133   // without copying the input.
1134   //
1135   // It may or may not be better to split the input into two bundles at the
1136   // point of the definition, which may permit merging. One case where it is
1137   // definitely better to split is if the input never has any register uses
1138   // after the instruction. Handle this splitting eagerly.
1139 
1140   LBlock* block = def.ins()->block();
1141 
1142   // The input's lifetime must end within the same block as the definition,
1143   // otherwise it could live on in phis elsewhere.
1144   if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
1145     def.setMustCopyInput();
1146     return true;
1147   }
1148 
1149   // If we already split the input for some other register, don't make a
1150   // third bundle.
1151   if (inputRange->bundle() != input.firstRange()->bundle()) {
1152     def.setMustCopyInput();
1153     return true;
1154   }
1155 
1156   // If the input will start out in memory then adding a separate bundle for
1157   // memory uses after the def won't help.
1158   if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
1159     def.setMustCopyInput();
1160     return true;
1161   }
1162 
1163   // The input cannot have register or reused uses after the definition.
1164   for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
1165     if (iter->pos <= inputOf(def.ins())) {
1166       continue;
1167     }
1168 
1169     LUse* use = iter->use();
1170     if (FindReusingDefOrTemp(insData[iter->pos], use)) {
1171       def.setMustCopyInput();
1172       return true;
1173     }
1174     if (iter->usePolicy() != LUse::ANY &&
1175         iter->usePolicy() != LUse::KEEPALIVE) {
1176       def.setMustCopyInput();
1177       return true;
1178     }
1179   }
1180 
1181   LiveRange* preRange = LiveRange::FallibleNew(
1182       alloc(), &input, inputRange->from(), outputOf(def.ins()));
1183   if (!preRange) {
1184     return false;
1185   }
1186 
1187   // The new range starts at reg's input position, which means it overlaps
1188   // with the old range at one position. This is what we want, because we
1189   // need to copy the input before the instruction.
1190   LiveRange* postRange = LiveRange::FallibleNew(
1191       alloc(), &input, inputOf(def.ins()), inputRange->to());
1192   if (!postRange) {
1193     return false;
1194   }
1195 
1196   inputRange->distributeUses(preRange);
1197   inputRange->distributeUses(postRange);
1198   MOZ_ASSERT(!inputRange->hasUses());
1199 
1200   JitSpewIfEnabled(JitSpew_RegAlloc,
1201                    "  splitting reused input at %u to try to help grouping",
1202                    inputOf(def.ins()).bits());
1203 
1204   LiveBundle* firstBundle = inputRange->bundle();
1205   input.removeRange(inputRange);
1206   input.addRange(preRange);
1207   input.addRange(postRange);
1208 
1209   firstBundle->removeRange(inputRange);
1210   firstBundle->addRange(preRange);
1211 
1212   // The new range goes in a separate bundle, where it will be spilled during
1213   // allocation.
1214   LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
1215   if (!secondBundle) {
1216     return false;
1217   }
1218   secondBundle->addRange(postRange);
1219 
1220   return tryMergeBundles(def.firstBundle(), input.firstBundle());
1221 }
1222 
allocateStackDefinition(VirtualRegister & reg)1223 void BacktrackingAllocator::allocateStackDefinition(VirtualRegister& reg) {
1224   LInstruction* ins = reg.ins()->toInstruction();
1225   if (reg.def()->type() == LDefinition::STACKRESULTS) {
1226     LStackArea alloc(ins->toInstruction());
1227     stackSlotAllocator.allocateStackArea(&alloc);
1228     reg.def()->setOutput(alloc);
1229   } else {
1230     // Because the definitions are visited in order, the area has been allocated
1231     // before we reach this result, so we know the operand is an LStackArea.
1232     const LUse* use = ins->getOperand(0)->toUse();
1233     VirtualRegister& area = vregs[use->virtualRegister()];
1234     const LStackArea* areaAlloc = area.def()->output()->toStackArea();
1235     reg.def()->setOutput(areaAlloc->resultAlloc(ins, reg.def()));
1236   }
1237 }
1238 
mergeAndQueueRegisters()1239 bool BacktrackingAllocator::mergeAndQueueRegisters() {
1240   MOZ_ASSERT(!vregs[0u].hasRanges());
1241 
1242   // Create a bundle for each register containing all its ranges.
1243   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1244     VirtualRegister& reg = vregs[i];
1245     if (!reg.hasRanges()) {
1246       continue;
1247     }
1248 
1249     LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
1250     if (!bundle) {
1251       return false;
1252     }
1253     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1254          iter++) {
1255       LiveRange* range = LiveRange::get(*iter);
1256       bundle->addRange(range);
1257     }
1258   }
1259 
1260   // If there is an OSR block, merge parameters in that block with the
1261   // corresponding parameters in the initial block.
1262   if (MBasicBlock* osr = graph.mir().osrBlock()) {
1263     size_t original = 1;
1264     for (LInstructionIterator iter = osr->lir()->begin();
1265          iter != osr->lir()->end(); iter++) {
1266       if (iter->isParameter()) {
1267         for (size_t i = 0; i < iter->numDefs(); i++) {
1268           DebugOnly<bool> found = false;
1269           VirtualRegister& paramVreg = vreg(iter->getDef(i));
1270           for (; original < paramVreg.vreg(); original++) {
1271             VirtualRegister& originalVreg = vregs[original];
1272             if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
1273               MOZ_ASSERT(originalVreg.ins()->isParameter());
1274               if (!tryMergeBundles(originalVreg.firstBundle(),
1275                                    paramVreg.firstBundle())) {
1276                 return false;
1277               }
1278               found = true;
1279               break;
1280             }
1281           }
1282           MOZ_ASSERT(found);
1283         }
1284       }
1285     }
1286   }
1287 
1288   // Try to merge registers with their reused inputs.
1289   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1290     VirtualRegister& reg = vregs[i];
1291     if (!reg.hasRanges()) {
1292       continue;
1293     }
1294 
1295     if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
1296       LUse* use = reg.ins()
1297                       ->toInstruction()
1298                       ->getOperand(reg.def()->getReusedInput())
1299                       ->toUse();
1300       if (!tryMergeReusedRegister(reg, vreg(use))) {
1301         return false;
1302       }
1303     }
1304   }
1305 
1306   // Try to merge phis with their inputs.
1307   for (size_t i = 0; i < graph.numBlocks(); i++) {
1308     LBlock* block = graph.getBlock(i);
1309     for (size_t j = 0; j < block->numPhis(); j++) {
1310       LPhi* phi = block->getPhi(j);
1311       VirtualRegister& outputVreg = vreg(phi->getDef(0));
1312       for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
1313         VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
1314         if (!tryMergeBundles(inputVreg.firstBundle(),
1315                              outputVreg.firstBundle())) {
1316           return false;
1317         }
1318       }
1319     }
1320   }
1321 
1322   // Add all bundles to the allocation queue, and create spill sets for them.
1323   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1324     VirtualRegister& reg = vregs[i];
1325 
1326     // Eagerly allocate stack result areas and their component stack results.
1327     if (reg.def() && reg.def()->policy() == LDefinition::STACK) {
1328       allocateStackDefinition(reg);
1329     }
1330 
1331     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1332          iter++) {
1333       LiveRange* range = LiveRange::get(*iter);
1334       LiveBundle* bundle = range->bundle();
1335       if (range == bundle->firstRange()) {
1336         if (!alloc().ensureBallast()) {
1337           return false;
1338         }
1339 
1340         SpillSet* spill = SpillSet::New(alloc());
1341         if (!spill) {
1342           return false;
1343         }
1344         bundle->setSpillSet(spill);
1345 
1346         size_t priority = computePriority(bundle);
1347         if (!allocationQueue.insert(QueueItem(bundle, priority))) {
1348           return false;
1349         }
1350       }
1351     }
1352   }
1353 
1354   return true;
1355 }
1356 
1357 static const size_t MAX_ATTEMPTS = 2;
1358 
tryAllocateFixed(LiveBundle * bundle,Requirement requirement,bool * success,bool * pfixed,LiveBundleVector & conflicting)1359 bool BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle,
1360                                              Requirement requirement,
1361                                              bool* success, bool* pfixed,
1362                                              LiveBundleVector& conflicting) {
1363   // Spill bundles which are required to be in a certain stack slot.
1364   if (!requirement.allocation().isRegister()) {
1365     JitSpew(JitSpew_RegAlloc, "  stack allocation requirement");
1366     bundle->setAllocation(requirement.allocation());
1367     *success = true;
1368     return true;
1369   }
1370 
1371   AnyRegister reg = requirement.allocation().toRegister();
1372   return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
1373                              conflicting);
1374 }
1375 
tryAllocateNonFixed(LiveBundle * bundle,Requirement requirement,Requirement hint,bool * success,bool * pfixed,LiveBundleVector & conflicting)1376 bool BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
1377                                                 Requirement requirement,
1378                                                 Requirement hint, bool* success,
1379                                                 bool* pfixed,
1380                                                 LiveBundleVector& conflicting) {
1381   // If we want, but do not require a bundle to be in a specific register,
1382   // only look at that register for allocating and evict or spill if it is
1383   // not available. Picking a separate register may be even worse than
1384   // spilling, as it will still necessitate moves and will tie up more
1385   // registers than if we spilled.
1386   if (hint.kind() == Requirement::FIXED) {
1387     AnyRegister reg = hint.allocation().toRegister();
1388     if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed,
1389                              conflicting)) {
1390       return false;
1391     }
1392     if (*success) {
1393       return true;
1394     }
1395   }
1396 
1397   // Spill bundles which have no hint or register requirement.
1398   if (requirement.kind() == Requirement::NONE &&
1399       hint.kind() != Requirement::REGISTER) {
1400     JitSpew(JitSpew_RegAlloc,
1401             "  postponed spill (no hint or register requirement)");
1402     if (!spilledBundles.append(bundle)) {
1403       return false;
1404     }
1405     *success = true;
1406     return true;
1407   }
1408 
1409   if (conflicting.empty() || minimalBundle(bundle)) {
1410     if (!tryAllocateAnyRegister(bundle, success, pfixed, conflicting)) {
1411       return false;
1412     }
1413     if (*success) {
1414       return true;
1415     }
1416   }
1417 
1418   // Spill bundles which have no register requirement if they didn't get
1419   // allocated.
1420   if (requirement.kind() == Requirement::NONE) {
1421     JitSpew(JitSpew_RegAlloc, "  postponed spill (no register requirement)");
1422     if (!spilledBundles.append(bundle)) {
1423       return false;
1424     }
1425     *success = true;
1426     return true;
1427   }
1428 
1429   // We failed to allocate this bundle.
1430   MOZ_ASSERT(!*success);
1431   return true;
1432 }
1433 
processBundle(MIRGenerator * mir,LiveBundle * bundle)1434 bool BacktrackingAllocator::processBundle(MIRGenerator* mir,
1435                                           LiveBundle* bundle) {
1436   JitSpewIfEnabled(JitSpew_RegAlloc,
1437                    "Allocating %s [priority %zu] [weight %zu]",
1438                    bundle->toString().get(), computePriority(bundle),
1439                    computeSpillWeight(bundle));
1440 
1441   // A bundle can be processed by doing any of the following:
1442   //
1443   // - Assigning the bundle a register. The bundle cannot overlap any other
1444   //   bundle allocated for that physical register.
1445   //
1446   // - Spilling the bundle, provided it has no register uses.
1447   //
1448   // - Splitting the bundle into two or more bundles which cover the original
1449   //   one. The new bundles are placed back onto the priority queue for later
1450   //   processing.
1451   //
1452   // - Evicting one or more existing allocated bundles, and then doing one
1453   //   of the above operations. Evicted bundles are placed back on the
1454   //   priority queue. Any evicted bundles must have a lower spill weight
1455   //   than the bundle being processed.
1456   //
1457   // As long as this structure is followed, termination is guaranteed.
1458   // In general, we want to minimize the amount of bundle splitting (which
1459   // generally necessitates spills), so allocate longer lived, lower weight
1460   // bundles first and evict and split them later if they prevent allocation
1461   // for higher weight bundles.
1462 
1463   Requirement requirement, hint;
1464   bool canAllocate = computeRequirement(bundle, &requirement, &hint);
1465 
1466   bool fixed;
1467   LiveBundleVector conflicting;
1468   for (size_t attempt = 0;; attempt++) {
1469     if (mir->shouldCancel("Backtracking Allocation (processBundle loop)")) {
1470       return false;
1471     }
1472 
1473     if (canAllocate) {
1474       bool success = false;
1475       fixed = false;
1476       conflicting.clear();
1477 
1478       // Ok, let's try allocating for this bundle.
1479       if (requirement.kind() == Requirement::FIXED) {
1480         if (!tryAllocateFixed(bundle, requirement, &success, &fixed,
1481                               conflicting)) {
1482           return false;
1483         }
1484       } else {
1485         if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed,
1486                                  conflicting)) {
1487           return false;
1488         }
1489       }
1490 
1491       // If that worked, we're done!
1492       if (success) {
1493         return true;
1494       }
1495 
1496       // If that didn't work, but we have one or more non-fixed bundles
1497       // known to be conflicting, maybe we can evict them and try again.
1498       if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) && !fixed &&
1499           !conflicting.empty() &&
1500           maximumSpillWeight(conflicting) < computeSpillWeight(bundle)) {
1501         for (size_t i = 0; i < conflicting.length(); i++) {
1502           if (!evictBundle(conflicting[i])) {
1503             return false;
1504           }
1505         }
1506         continue;
1507       }
1508     }
1509 
1510     // A minimal bundle cannot be split any further. If we try to split it
1511     // it at this point we will just end up with the same bundle and will
1512     // enter an infinite loop. Weights and the initial live ranges must
1513     // be constructed so that any minimal bundle is allocatable.
1514     MOZ_ASSERT(!minimalBundle(bundle));
1515 
1516     LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
1517     return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
1518   }
1519 }
1520 
computeRequirement(LiveBundle * bundle,Requirement * requirement,Requirement * hint)1521 bool BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
1522                                                Requirement* requirement,
1523                                                Requirement* hint) {
1524   // Set any requirement or hint on bundle according to its definition and
1525   // uses. Return false if there are conflicting requirements which will
1526   // require the bundle to be split.
1527 
1528   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1529        iter++) {
1530     LiveRange* range = LiveRange::get(*iter);
1531     VirtualRegister& reg = range->vreg();
1532 
1533     if (range->hasDefinition()) {
1534       // Deal with any definition constraints/hints.
1535       LDefinition::Policy policy = reg.def()->policy();
1536       if (policy == LDefinition::FIXED || policy == LDefinition::STACK) {
1537         // Fixed and stack policies get a FIXED requirement.  (In the stack
1538         // case, the allocation should have been performed already by
1539         // mergeAndQueueRegisters.)
1540         JitSpewIfEnabled(JitSpew_RegAlloc,
1541                          "  Requirement %s, fixed by definition",
1542                          reg.def()->output()->toString().get());
1543         if (!requirement->merge(Requirement(*reg.def()->output()))) {
1544           return false;
1545         }
1546       } else if (reg.ins()->isPhi()) {
1547         // Phis don't have any requirements, but they should prefer their
1548         // input allocations. This is captured by the group hints above.
1549       } else {
1550         // Non-phis get a REGISTER requirement.
1551         if (!requirement->merge(Requirement(Requirement::REGISTER))) {
1552           return false;
1553         }
1554       }
1555     }
1556 
1557     // Search uses for requirements.
1558     for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
1559       LUse::Policy policy = iter->usePolicy();
1560       if (policy == LUse::FIXED) {
1561         AnyRegister required = GetFixedRegister(reg.def(), iter->use());
1562 
1563         JitSpewIfEnabled(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
1564                          required.name(), iter->pos.bits());
1565 
1566         // If there are multiple fixed registers which the bundle is
1567         // required to use, fail. The bundle will need to be split before
1568         // it can be allocated.
1569         if (!requirement->merge(Requirement(LAllocation(required)))) {
1570           return false;
1571         }
1572       } else if (policy == LUse::REGISTER) {
1573         if (!requirement->merge(Requirement(Requirement::REGISTER))) {
1574           return false;
1575         }
1576       } else if (policy == LUse::ANY) {
1577         // ANY differs from KEEPALIVE by actively preferring a register.
1578         if (!hint->merge(Requirement(Requirement::REGISTER))) {
1579           return false;
1580         }
1581       }
1582 
1583       // The only case of STACK use policies is individual stack results using
1584       // their containing stack result area, which is given a fixed allocation
1585       // above.
1586       MOZ_ASSERT_IF(policy == LUse::STACK,
1587                     requirement->kind() == Requirement::FIXED);
1588       MOZ_ASSERT_IF(policy == LUse::STACK,
1589                     requirement->allocation().isStackArea());
1590     }
1591   }
1592 
1593   return true;
1594 }
1595 
tryAllocateRegister(PhysicalRegister & r,LiveBundle * bundle,bool * success,bool * pfixed,LiveBundleVector & conflicting)1596 bool BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r,
1597                                                 LiveBundle* bundle,
1598                                                 bool* success, bool* pfixed,
1599                                                 LiveBundleVector& conflicting) {
1600   *success = false;
1601 
1602   if (!r.allocatable) {
1603     return true;
1604   }
1605 
1606   LiveBundleVector aliasedConflicting;
1607 
1608   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1609        iter++) {
1610     LiveRange* range = LiveRange::get(*iter);
1611 
1612     // All ranges in the bundle must be compatible with the physical register.
1613     MOZ_ASSERT(range->vreg().isCompatible(r.reg));
1614 
1615     for (size_t a = 0; a < r.reg.numAliased(); a++) {
1616       PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
1617       LiveRange* existing;
1618       if (!rAlias.allocations.contains(range, &existing)) {
1619         continue;
1620       }
1621       if (existing->hasVreg()) {
1622         MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
1623         bool duplicate = false;
1624         for (size_t i = 0; i < aliasedConflicting.length(); i++) {
1625           if (aliasedConflicting[i] == existing->bundle()) {
1626             duplicate = true;
1627             break;
1628           }
1629         }
1630         if (!duplicate && !aliasedConflicting.append(existing->bundle())) {
1631           return false;
1632         }
1633       } else {
1634         JitSpewIfEnabled(JitSpew_RegAlloc, "  %s collides with fixed use %s",
1635                          rAlias.reg.name(), existing->toString().get());
1636         *pfixed = true;
1637         return true;
1638       }
1639     }
1640   }
1641 
1642   if (!aliasedConflicting.empty()) {
1643     // One or more aliased registers is allocated to another bundle
1644     // overlapping this one. Keep track of the conflicting set, and in the
1645     // case of multiple conflicting sets keep track of the set with the
1646     // lowest maximum spill weight.
1647 
1648     // The #ifdef guards against "unused variable 'existing'" bustage.
1649 #ifdef JS_JITSPEW
1650     if (JitSpewEnabled(JitSpew_RegAlloc)) {
1651       if (aliasedConflicting.length() == 1) {
1652         LiveBundle* existing = aliasedConflicting[0];
1653         JitSpew(JitSpew_RegAlloc, "  %s collides with %s [weight %zu]",
1654                 r.reg.name(), existing->toString().get(),
1655                 computeSpillWeight(existing));
1656       } else {
1657         JitSpew(JitSpew_RegAlloc, "  %s collides with the following",
1658                 r.reg.name());
1659         for (size_t i = 0; i < aliasedConflicting.length(); i++) {
1660           LiveBundle* existing = aliasedConflicting[i];
1661           JitSpew(JitSpew_RegAlloc, "    %s [weight %zu]",
1662                   existing->toString().get(), computeSpillWeight(existing));
1663         }
1664       }
1665     }
1666 #endif
1667 
1668     if (conflicting.empty()) {
1669       conflicting = std::move(aliasedConflicting);
1670     } else {
1671       if (maximumSpillWeight(aliasedConflicting) <
1672           maximumSpillWeight(conflicting)) {
1673         conflicting = std::move(aliasedConflicting);
1674       }
1675     }
1676     return true;
1677   }
1678 
1679   JitSpewIfEnabled(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
1680 
1681   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1682        iter++) {
1683     LiveRange* range = LiveRange::get(*iter);
1684     if (!alloc().ensureBallast()) {
1685       return false;
1686     }
1687     if (!r.allocations.insert(range)) {
1688       return false;
1689     }
1690   }
1691 
1692   bundle->setAllocation(LAllocation(r.reg));
1693   *success = true;
1694   return true;
1695 }
1696 
tryAllocateAnyRegister(LiveBundle * bundle,bool * success,bool * pfixed,LiveBundleVector & conflicting)1697 bool BacktrackingAllocator::tryAllocateAnyRegister(
1698     LiveBundle* bundle, bool* success, bool* pfixed,
1699     LiveBundleVector& conflicting) {
1700   // Search for any available register which the bundle can be allocated to.
1701 
1702   LDefinition::Type type = bundle->firstRange()->vreg().type();
1703 
1704   if (LDefinition::isFloatReg(type)) {
1705     for (size_t i = AnyRegister::FirstFloatReg; i < AnyRegister::Total; i++) {
1706       if (!LDefinition::isFloatRegCompatible(type, registers[i].reg.fpu())) {
1707         continue;
1708       }
1709       if (!tryAllocateRegister(registers[i], bundle, success, pfixed,
1710                                conflicting)) {
1711         return false;
1712       }
1713       if (*success) {
1714         break;
1715       }
1716     }
1717     return true;
1718   }
1719 
1720   for (size_t i = 0; i < AnyRegister::FirstFloatReg; i++) {
1721     if (!tryAllocateRegister(registers[i], bundle, success, pfixed,
1722                              conflicting)) {
1723       return false;
1724     }
1725     if (*success) {
1726       break;
1727     }
1728   }
1729   return true;
1730 }
1731 
evictBundle(LiveBundle * bundle)1732 bool BacktrackingAllocator::evictBundle(LiveBundle* bundle) {
1733   JitSpewIfEnabled(JitSpew_RegAlloc,
1734                    "  Evicting %s [priority %zu] [weight %zu]",
1735                    bundle->toString().get(), computePriority(bundle),
1736                    computeSpillWeight(bundle));
1737 
1738   AnyRegister reg(bundle->allocation().toRegister());
1739   PhysicalRegister& physical = registers[reg.code()];
1740   MOZ_ASSERT(physical.reg == reg && physical.allocatable);
1741 
1742   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1743        iter++) {
1744     LiveRange* range = LiveRange::get(*iter);
1745     physical.allocations.remove(range);
1746   }
1747 
1748   bundle->setAllocation(LAllocation());
1749 
1750   size_t priority = computePriority(bundle);
1751   return allocationQueue.insert(QueueItem(bundle, priority));
1752 }
1753 
splitAndRequeueBundles(LiveBundle * bundle,const LiveBundleVector & newBundles)1754 bool BacktrackingAllocator::splitAndRequeueBundles(
1755     LiveBundle* bundle, const LiveBundleVector& newBundles) {
1756 #ifdef DEBUG
1757   if (newBundles.length() == 1) {
1758     LiveBundle* newBundle = newBundles[0];
1759     if (newBundle->numRanges() == bundle->numRanges() &&
1760         computePriority(newBundle) == computePriority(bundle)) {
1761       bool different = false;
1762       LiveRange::BundleLinkIterator oldRanges = bundle->rangesBegin();
1763       LiveRange::BundleLinkIterator newRanges = newBundle->rangesBegin();
1764       while (oldRanges) {
1765         LiveRange* oldRange = LiveRange::get(*oldRanges);
1766         LiveRange* newRange = LiveRange::get(*newRanges);
1767         if (oldRange->from() != newRange->from() ||
1768             oldRange->to() != newRange->to()) {
1769           different = true;
1770           break;
1771         }
1772         oldRanges++;
1773         newRanges++;
1774       }
1775 
1776       // This is likely to trigger an infinite loop in register allocation. This
1777       // can be the result of invalid register constraints, making regalloc
1778       // impossible; consider relaxing those.
1779       MOZ_ASSERT(different,
1780                  "Split results in the same bundle with the same priority");
1781     }
1782   }
1783 #endif
1784 
1785   if (JitSpewEnabled(JitSpew_RegAlloc)) {
1786     JitSpew(JitSpew_RegAlloc, "  .. into:");
1787     for (size_t i = 0; i < newBundles.length(); i++) {
1788       JitSpew(JitSpew_RegAlloc, "    %s", newBundles[i]->toString().get());
1789     }
1790   }
1791 
1792   // Remove all ranges in the old bundle from their register's list.
1793   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1794        iter++) {
1795     LiveRange* range = LiveRange::get(*iter);
1796     range->vreg().removeRange(range);
1797   }
1798 
1799   // Add all ranges in the new bundles to their register's list.
1800   for (size_t i = 0; i < newBundles.length(); i++) {
1801     LiveBundle* newBundle = newBundles[i];
1802     for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter;
1803          iter++) {
1804       LiveRange* range = LiveRange::get(*iter);
1805       range->vreg().addRange(range);
1806     }
1807   }
1808 
1809   // Queue the new bundles for register assignment.
1810   for (size_t i = 0; i < newBundles.length(); i++) {
1811     LiveBundle* newBundle = newBundles[i];
1812     size_t priority = computePriority(newBundle);
1813     if (!allocationQueue.insert(QueueItem(newBundle, priority))) {
1814       return false;
1815     }
1816   }
1817 
1818   return true;
1819 }
1820 
spill(LiveBundle * bundle)1821 bool BacktrackingAllocator::spill(LiveBundle* bundle) {
1822   JitSpew(JitSpew_RegAlloc, "  Spilling bundle");
1823   MOZ_ASSERT(bundle->allocation().isBogus());
1824 
1825   if (LiveBundle* spillParent = bundle->spillParent()) {
1826     JitSpew(JitSpew_RegAlloc, "    Using existing spill bundle");
1827     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1828          iter++) {
1829       LiveRange* range = LiveRange::get(*iter);
1830       LiveRange* parentRange = spillParent->rangeFor(range->from());
1831       MOZ_ASSERT(parentRange->contains(range));
1832       MOZ_ASSERT(&range->vreg() == &parentRange->vreg());
1833       range->distributeUses(parentRange);
1834       MOZ_ASSERT(!range->hasUses());
1835       range->vreg().removeRange(range);
1836     }
1837     return true;
1838   }
1839 
1840   return bundle->spillSet()->addSpilledBundle(bundle);
1841 }
1842 
tryAllocatingRegistersForSpillBundles()1843 bool BacktrackingAllocator::tryAllocatingRegistersForSpillBundles() {
1844   for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
1845     LiveBundle* bundle = *it;
1846     LiveBundleVector conflicting;
1847     bool fixed = false;
1848     bool success = false;
1849 
1850     if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles")) {
1851       return false;
1852     }
1853 
1854     JitSpewIfEnabled(JitSpew_RegAlloc, "Spill or allocate %s",
1855                      bundle->toString().get());
1856 
1857     if (!tryAllocateAnyRegister(bundle, &success, &fixed, conflicting)) {
1858       return false;
1859     }
1860 
1861     // If the bundle still has no register, spill the bundle.
1862     if (!success && !spill(bundle)) {
1863       return false;
1864     }
1865   }
1866 
1867   return true;
1868 }
1869 
pickStackSlots()1870 bool BacktrackingAllocator::pickStackSlots() {
1871   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
1872     VirtualRegister& reg = vregs[i];
1873 
1874     if (mir->shouldCancel("Backtracking Pick Stack Slots")) {
1875       return false;
1876     }
1877 
1878     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
1879          iter++) {
1880       LiveRange* range = LiveRange::get(*iter);
1881       LiveBundle* bundle = range->bundle();
1882 
1883       if (bundle->allocation().isBogus()) {
1884         if (!pickStackSlot(bundle->spillSet())) {
1885           return false;
1886         }
1887         MOZ_ASSERT(!bundle->allocation().isBogus());
1888       }
1889     }
1890   }
1891 
1892   return true;
1893 }
1894 
pickStackSlot(SpillSet * spillSet)1895 bool BacktrackingAllocator::pickStackSlot(SpillSet* spillSet) {
1896   // Look through all ranges that have been spilled in this set for a
1897   // register definition which is fixed to a stack or argument slot. If we
1898   // find one, use it for all bundles that have been spilled. tryMergeBundles
1899   // makes sure this reuse is possible when an initial bundle contains ranges
1900   // from multiple virtual registers.
1901   for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1902     LiveBundle* bundle = spillSet->spilledBundle(i);
1903     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1904          iter++) {
1905       LiveRange* range = LiveRange::get(*iter);
1906       if (range->hasDefinition()) {
1907         LDefinition* def = range->vreg().def();
1908         if (def->policy() == LDefinition::FIXED) {
1909           MOZ_ASSERT(!def->output()->isRegister());
1910           MOZ_ASSERT(!def->output()->isStackSlot());
1911           spillSet->setAllocation(*def->output());
1912           return true;
1913         }
1914       }
1915     }
1916   }
1917 
1918   LDefinition::Type type =
1919       spillSet->spilledBundle(0)->firstRange()->vreg().type();
1920 
1921   SpillSlotList* slotList;
1922   switch (StackSlotAllocator::width(type)) {
1923     case 4:
1924       slotList = &normalSlots;
1925       break;
1926     case 8:
1927       slotList = &doubleSlots;
1928       break;
1929     case 16:
1930       slotList = &quadSlots;
1931       break;
1932     default:
1933       MOZ_CRASH("Bad width");
1934   }
1935 
1936   // Maximum number of existing spill slots we will look at before giving up
1937   // and allocating a new slot.
1938   static const size_t MAX_SEARCH_COUNT = 10;
1939 
1940   size_t searches = 0;
1941   SpillSlot* stop = nullptr;
1942   while (!slotList->empty()) {
1943     SpillSlot* spillSlot = *slotList->begin();
1944     if (!stop) {
1945       stop = spillSlot;
1946     } else if (stop == spillSlot) {
1947       // We looked through every slot in the list.
1948       break;
1949     }
1950 
1951     bool success = true;
1952     for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1953       LiveBundle* bundle = spillSet->spilledBundle(i);
1954       for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
1955            iter++) {
1956         LiveRange* range = LiveRange::get(*iter);
1957         LiveRange* existing;
1958         if (spillSlot->allocated.contains(range, &existing)) {
1959           success = false;
1960           break;
1961         }
1962       }
1963       if (!success) {
1964         break;
1965       }
1966     }
1967     if (success) {
1968       // We can reuse this physical stack slot for the new bundles.
1969       // Update the allocated ranges for the slot.
1970       for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
1971         LiveBundle* bundle = spillSet->spilledBundle(i);
1972         if (!insertAllRanges(spillSlot->allocated, bundle)) {
1973           return false;
1974         }
1975       }
1976       spillSet->setAllocation(spillSlot->alloc);
1977       return true;
1978     }
1979 
1980     // On a miss, move the spill to the end of the list. This will cause us
1981     // to make fewer attempts to allocate from slots with a large and
1982     // highly contended range.
1983     slotList->popFront();
1984     slotList->pushBack(spillSlot);
1985 
1986     if (++searches == MAX_SEARCH_COUNT) {
1987       break;
1988     }
1989   }
1990 
1991   // We need a new physical stack slot.
1992   uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
1993 
1994   SpillSlot* spillSlot =
1995       new (alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
1996   if (!spillSlot) {
1997     return false;
1998   }
1999 
2000   for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
2001     LiveBundle* bundle = spillSet->spilledBundle(i);
2002     if (!insertAllRanges(spillSlot->allocated, bundle)) {
2003       return false;
2004     }
2005   }
2006 
2007   spillSet->setAllocation(spillSlot->alloc);
2008 
2009   slotList->pushFront(spillSlot);
2010   return true;
2011 }
2012 
insertAllRanges(LiveRangeSet & set,LiveBundle * bundle)2013 bool BacktrackingAllocator::insertAllRanges(LiveRangeSet& set,
2014                                             LiveBundle* bundle) {
2015   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2016        iter++) {
2017     LiveRange* range = LiveRange::get(*iter);
2018     if (!alloc().ensureBallast()) {
2019       return false;
2020     }
2021     if (!set.insert(range)) {
2022       return false;
2023     }
2024   }
2025   return true;
2026 }
2027 
deadRange(LiveRange * range)2028 bool BacktrackingAllocator::deadRange(LiveRange* range) {
2029   // Check for direct uses of this range.
2030   if (range->hasUses() || range->hasDefinition()) {
2031     return false;
2032   }
2033 
2034   CodePosition start = range->from();
2035   LNode* ins = insData[start];
2036   if (start == entryOf(ins->block())) {
2037     return false;
2038   }
2039 
2040   VirtualRegister& reg = range->vreg();
2041 
2042   // Check if there are later ranges for this vreg.
2043   LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
2044   for (iter++; iter; iter++) {
2045     LiveRange* laterRange = LiveRange::get(*iter);
2046     if (laterRange->from() > range->from()) {
2047       return false;
2048     }
2049   }
2050 
2051   // Check if this range ends at a loop backedge.
2052   LNode* last = insData[range->to().previous()];
2053   if (last->isGoto() &&
2054       last->toGoto()->target()->id() < last->block()->mir()->id()) {
2055     return false;
2056   }
2057 
2058   // Check if there are phis which this vreg flows to.
2059   if (reg.usedByPhi()) {
2060     return false;
2061   }
2062 
2063   return true;
2064 }
2065 
moveAtEdge(LBlock * predecessor,LBlock * successor,LiveRange * from,LiveRange * to,LDefinition::Type type)2066 bool BacktrackingAllocator::moveAtEdge(LBlock* predecessor, LBlock* successor,
2067                                        LiveRange* from, LiveRange* to,
2068                                        LDefinition::Type type) {
2069   if (successor->mir()->numPredecessors() > 1) {
2070     MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
2071     return moveAtExit(predecessor, from, to, type);
2072   }
2073 
2074   return moveAtEntry(successor, from, to, type);
2075 }
2076 
resolveControlFlow()2077 bool BacktrackingAllocator::resolveControlFlow() {
2078   // Add moves to handle changing assignments for vregs over their lifetime.
2079   JitSpew(JitSpew_RegAlloc, "ResolveControlFlow: begin");
2080 
2081   JitSpew(JitSpew_RegAlloc,
2082           "  ResolveControlFlow: adding MoveGroups within blocks");
2083 
2084   // Look for places where a register's assignment changes in the middle of a
2085   // basic block.
2086   MOZ_ASSERT(!vregs[0u].hasRanges());
2087   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2088     VirtualRegister& reg = vregs[i];
2089 
2090     if (mir->shouldCancel(
2091             "Backtracking Resolve Control Flow (vreg outer loop)")) {
2092       return false;
2093     }
2094 
2095     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;) {
2096       LiveRange* range = LiveRange::get(*iter);
2097 
2098       if (mir->shouldCancel(
2099               "Backtracking Resolve Control Flow (vreg inner loop)")) {
2100         return false;
2101       }
2102 
2103       // Remove ranges which will never be used.
2104       if (deadRange(range)) {
2105         reg.removeRangeAndIncrement(iter);
2106         continue;
2107       }
2108 
2109       // The range which defines the register does not have a predecessor
2110       // to add moves from.
2111       if (range->hasDefinition()) {
2112         iter++;
2113         continue;
2114       }
2115 
2116       // Ignore ranges that start at block boundaries. We will handle
2117       // these in the next phase.
2118       CodePosition start = range->from();
2119       LNode* ins = insData[start];
2120       if (start == entryOf(ins->block())) {
2121         iter++;
2122         continue;
2123       }
2124 
2125       // If we already saw a range which covers the start of this range
2126       // and has the same allocation, we don't need an explicit move at
2127       // the start of this range.
2128       bool skip = false;
2129       for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
2130            prevIter != iter; prevIter++) {
2131         LiveRange* prevRange = LiveRange::get(*prevIter);
2132         if (prevRange->covers(start) && prevRange->bundle()->allocation() ==
2133                                             range->bundle()->allocation()) {
2134           skip = true;
2135           break;
2136         }
2137       }
2138       if (skip) {
2139         iter++;
2140         continue;
2141       }
2142 
2143       if (!alloc().ensureBallast()) {
2144         return false;
2145       }
2146 
2147       LiveRange* predecessorRange =
2148           reg.rangeFor(start.previous(), /* preferRegister = */ true);
2149       if (start.subpos() == CodePosition::INPUT) {
2150         JitSpew(JitSpew_RegAlloc, "    moveInput (%s) <- (%s)",
2151                 range->toString().get(), predecessorRange->toString().get());
2152         if (!moveInput(ins->toInstruction(), predecessorRange, range,
2153                        reg.type())) {
2154           return false;
2155         }
2156       } else {
2157         JitSpew(JitSpew_RegAlloc, "    (moveAfter)");
2158         if (!moveAfter(ins->toInstruction(), predecessorRange, range,
2159                        reg.type())) {
2160           return false;
2161         }
2162       }
2163 
2164       iter++;
2165     }
2166   }
2167 
2168   JitSpew(JitSpew_RegAlloc,
2169           "  ResolveControlFlow: adding MoveGroups for phi nodes");
2170 
2171   for (size_t i = 0; i < graph.numBlocks(); i++) {
2172     if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) {
2173       return false;
2174     }
2175 
2176     LBlock* successor = graph.getBlock(i);
2177     MBasicBlock* mSuccessor = successor->mir();
2178     if (mSuccessor->numPredecessors() < 1) {
2179       continue;
2180     }
2181 
2182     // Resolve phis to moves.
2183     for (size_t j = 0; j < successor->numPhis(); j++) {
2184       LPhi* phi = successor->getPhi(j);
2185       MOZ_ASSERT(phi->numDefs() == 1);
2186       LDefinition* def = phi->getDef(0);
2187       VirtualRegister& reg = vreg(def);
2188       LiveRange* to = reg.rangeFor(entryOf(successor));
2189       MOZ_ASSERT(to);
2190 
2191       for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
2192         LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
2193         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
2194 
2195         LAllocation* input = phi->getOperand(k);
2196         LiveRange* from = vreg(input).rangeFor(exitOf(predecessor),
2197                                                /* preferRegister = */ true);
2198         MOZ_ASSERT(from);
2199 
2200         if (!alloc().ensureBallast()) {
2201           return false;
2202         }
2203 
2204         // Note: we have to use moveAtEdge both here and below (for edge
2205         // resolution) to avoid conflicting moves. See bug 1493900.
2206         JitSpew(JitSpew_RegAlloc, "    (moveAtEdge#1)");
2207         if (!moveAtEdge(predecessor, successor, from, to, def->type())) {
2208           return false;
2209         }
2210       }
2211     }
2212   }
2213 
2214   JitSpew(JitSpew_RegAlloc,
2215           "  ResolveControlFlow: adding MoveGroups to fix conflicted edges");
2216 
2217   // Add moves to resolve graph edges with different allocations at their
2218   // source and target.
2219   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2220     VirtualRegister& reg = vregs[i];
2221     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2222          iter++) {
2223       LiveRange* targetRange = LiveRange::get(*iter);
2224 
2225       size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
2226       if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId)))) {
2227         firstBlockId++;
2228       }
2229       for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
2230         LBlock* successor = graph.getBlock(id);
2231         if (!targetRange->covers(entryOf(successor))) {
2232           break;
2233         }
2234 
2235         BitSet& live = liveIn[id];
2236         if (!live.contains(i)) {
2237           continue;
2238         }
2239 
2240         for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
2241           LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
2242           if (targetRange->covers(exitOf(predecessor))) {
2243             continue;
2244           }
2245 
2246           if (!alloc().ensureBallast()) {
2247             return false;
2248           }
2249           JitSpew(JitSpew_RegAlloc, "    (moveAtEdge#2)");
2250           LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
2251           if (!moveAtEdge(predecessor, successor, from, targetRange,
2252                           reg.type())) {
2253             return false;
2254           }
2255         }
2256       }
2257     }
2258   }
2259 
2260   JitSpew(JitSpew_RegAlloc, "ResolveControlFlow: end");
2261   return true;
2262 }
2263 
isReusedInput(LUse * use,LNode * ins,bool considerCopy)2264 bool BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins,
2265                                           bool considerCopy) {
2266   if (LDefinition* def = FindReusingDefOrTemp(ins, use)) {
2267     return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
2268   }
2269   return false;
2270 }
2271 
isRegisterUse(UsePosition * use,LNode * ins,bool considerCopy)2272 bool BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins,
2273                                           bool considerCopy) {
2274   switch (use->usePolicy()) {
2275     case LUse::ANY:
2276       return isReusedInput(use->use(), ins, considerCopy);
2277 
2278     case LUse::REGISTER:
2279     case LUse::FIXED:
2280       return true;
2281 
2282     default:
2283       return false;
2284   }
2285 }
2286 
isRegisterDefinition(LiveRange * range)2287 bool BacktrackingAllocator::isRegisterDefinition(LiveRange* range) {
2288   if (!range->hasDefinition()) {
2289     return false;
2290   }
2291 
2292   VirtualRegister& reg = range->vreg();
2293   if (reg.ins()->isPhi()) {
2294     return false;
2295   }
2296 
2297   if (reg.def()->policy() == LDefinition::FIXED &&
2298       !reg.def()->output()->isRegister()) {
2299     return false;
2300   }
2301 
2302   return true;
2303 }
2304 
reifyAllocations()2305 bool BacktrackingAllocator::reifyAllocations() {
2306   JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
2307 
2308   MOZ_ASSERT(!vregs[0u].hasRanges());
2309   for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
2310     VirtualRegister& reg = vregs[i];
2311 
2312     if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) {
2313       return false;
2314     }
2315 
2316     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2317          iter++) {
2318       LiveRange* range = LiveRange::get(*iter);
2319 
2320       if (range->hasDefinition()) {
2321         reg.def()->setOutput(range->bundle()->allocation());
2322         if (reg.ins()->recoversInput()) {
2323           LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
2324           for (size_t i = 0; i < snapshot->numEntries(); i++) {
2325             LAllocation* entry = snapshot->getEntry(i);
2326             if (entry->isUse() &&
2327                 entry->toUse()->policy() == LUse::RECOVERED_INPUT) {
2328               *entry = *reg.def()->output();
2329             }
2330           }
2331         }
2332       }
2333 
2334       for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
2335         LAllocation* alloc = iter->use();
2336         *alloc = range->bundle()->allocation();
2337 
2338         // For any uses which feed into MUST_REUSE_INPUT definitions,
2339         // add copies if the use and def have different allocations.
2340         LNode* ins = insData[iter->pos];
2341         if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
2342           LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
2343           LAllocation res = outputRange->bundle()->allocation();
2344           LAllocation sourceAlloc = range->bundle()->allocation();
2345 
2346           if (res != *alloc) {
2347             if (!this->alloc().ensureBallast()) {
2348               return false;
2349             }
2350             if (NumReusingDefs(ins->toInstruction()) <= 1) {
2351               LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
2352               if (!group->addAfter(sourceAlloc, res, reg.type())) {
2353                 return false;
2354               }
2355             } else {
2356               LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
2357               if (!group->add(sourceAlloc, res, reg.type())) {
2358                 return false;
2359               }
2360             }
2361             *alloc = res;
2362           }
2363         }
2364       }
2365 
2366       addLiveRegistersForRange(reg, range);
2367     }
2368   }
2369 
2370   graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
2371   return true;
2372 }
2373 
findFirstNonCallSafepoint(CodePosition from)2374 size_t BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from) {
2375   size_t i = 0;
2376   for (; i < graph.numNonCallSafepoints(); i++) {
2377     const LInstruction* ins = graph.getNonCallSafepoint(i);
2378     if (from <= inputOf(ins)) {
2379       break;
2380     }
2381   }
2382   return i;
2383 }
2384 
addLiveRegistersForRange(VirtualRegister & reg,LiveRange * range)2385 void BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg,
2386                                                      LiveRange* range) {
2387   // Fill in the live register sets for all non-call safepoints.
2388   LAllocation a = range->bundle()->allocation();
2389   if (!a.isRegister()) {
2390     return;
2391   }
2392 
2393   // Don't add output registers to the safepoint.
2394   CodePosition start = range->from();
2395   if (range->hasDefinition() && !reg.isTemp()) {
2396 #ifdef CHECK_OSIPOINT_REGISTERS
2397     // We don't add the output register to the safepoint,
2398     // but it still might get added as one of the inputs.
2399     // So eagerly add this reg to the safepoint clobbered registers.
2400     if (reg.ins()->isInstruction()) {
2401       if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) {
2402         safepoint->addClobberedRegister(a.toRegister());
2403       }
2404     }
2405 #endif
2406     start = start.next();
2407   }
2408 
2409   size_t i = findFirstNonCallSafepoint(start);
2410   for (; i < graph.numNonCallSafepoints(); i++) {
2411     LInstruction* ins = graph.getNonCallSafepoint(i);
2412     CodePosition pos = inputOf(ins);
2413 
2414     // Safepoints are sorted, so we can shortcut out of this loop
2415     // if we go out of range.
2416     if (range->to() <= pos) {
2417       break;
2418     }
2419 
2420     MOZ_ASSERT(range->covers(pos));
2421 
2422     LSafepoint* safepoint = ins->safepoint();
2423     safepoint->addLiveRegister(a.toRegister());
2424 
2425 #ifdef CHECK_OSIPOINT_REGISTERS
2426     if (reg.isTemp()) {
2427       safepoint->addClobberedRegister(a.toRegister());
2428     }
2429 #endif
2430   }
2431 }
2432 
IsNunbox(VirtualRegister & reg)2433 static inline bool IsNunbox(VirtualRegister& reg) {
2434 #ifdef JS_NUNBOX32
2435   return reg.type() == LDefinition::TYPE || reg.type() == LDefinition::PAYLOAD;
2436 #else
2437   return false;
2438 #endif
2439 }
2440 
IsSlotsOrElements(VirtualRegister & reg)2441 static inline bool IsSlotsOrElements(VirtualRegister& reg) {
2442   return reg.type() == LDefinition::SLOTS;
2443 }
2444 
IsTraceable(VirtualRegister & reg)2445 static inline bool IsTraceable(VirtualRegister& reg) {
2446   if (reg.type() == LDefinition::OBJECT) {
2447     return true;
2448   }
2449 #ifdef JS_PUNBOX64
2450   if (reg.type() == LDefinition::BOX) {
2451     return true;
2452   }
2453 #endif
2454   if (reg.type() == LDefinition::STACKRESULTS) {
2455     MOZ_ASSERT(reg.def());
2456     const LStackArea* alloc = reg.def()->output()->toStackArea();
2457     for (auto iter = alloc->results(); iter; iter.next()) {
2458       if (iter.isGcPointer()) {
2459         return true;
2460       }
2461     }
2462   }
2463   return false;
2464 }
2465 
findFirstSafepoint(CodePosition pos,size_t startFrom)2466 size_t BacktrackingAllocator::findFirstSafepoint(CodePosition pos,
2467                                                  size_t startFrom) {
2468   size_t i = startFrom;
2469   for (; i < graph.numSafepoints(); i++) {
2470     LInstruction* ins = graph.getSafepoint(i);
2471     if (pos <= inputOf(ins)) {
2472       break;
2473     }
2474   }
2475   return i;
2476 }
2477 
populateSafepoints()2478 bool BacktrackingAllocator::populateSafepoints() {
2479   JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
2480 
2481   size_t firstSafepoint = 0;
2482 
2483   MOZ_ASSERT(!vregs[0u].def());
2484   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2485     VirtualRegister& reg = vregs[i];
2486 
2487     if (!reg.def() ||
2488         (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) {
2489       continue;
2490     }
2491 
2492     firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
2493     if (firstSafepoint >= graph.numSafepoints()) {
2494       break;
2495     }
2496 
2497     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2498          iter++) {
2499       LiveRange* range = LiveRange::get(*iter);
2500 
2501       for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
2502         LInstruction* ins = graph.getSafepoint(j);
2503 
2504         if (!range->covers(inputOf(ins))) {
2505           if (inputOf(ins) >= range->to()) {
2506             break;
2507           }
2508           continue;
2509         }
2510 
2511         // Include temps but not instruction outputs. Also make sure
2512         // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
2513         // we would have to add the input reg to this safepoint.
2514         if (ins == reg.ins() && !reg.isTemp()) {
2515           DebugOnly<LDefinition*> def = reg.def();
2516           MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
2517                         def->type() == LDefinition::GENERAL ||
2518                             def->type() == LDefinition::INT32 ||
2519                             def->type() == LDefinition::FLOAT32 ||
2520                             def->type() == LDefinition::DOUBLE ||
2521                             def->type() == LDefinition::SIMD128);
2522           continue;
2523         }
2524 
2525         LSafepoint* safepoint = ins->safepoint();
2526 
2527         LAllocation a = range->bundle()->allocation();
2528         if (a.isGeneralReg() && ins->isCall()) {
2529           continue;
2530         }
2531 
2532         switch (reg.type()) {
2533           case LDefinition::OBJECT:
2534             if (!safepoint->addGcPointer(a)) {
2535               return false;
2536             }
2537             break;
2538           case LDefinition::SLOTS:
2539             if (!safepoint->addSlotsOrElementsPointer(a)) {
2540               return false;
2541             }
2542             break;
2543           case LDefinition::STACKRESULTS: {
2544             MOZ_ASSERT(a.isStackArea());
2545             for (auto iter = a.toStackArea()->results(); iter; iter.next()) {
2546               if (iter.isGcPointer()) {
2547                 if (!safepoint->addGcPointer(iter.alloc())) {
2548                   return false;
2549                 }
2550               }
2551             }
2552             break;
2553           }
2554 #ifdef JS_NUNBOX32
2555           case LDefinition::TYPE:
2556             if (!safepoint->addNunboxType(i, a)) {
2557               return false;
2558             }
2559             break;
2560           case LDefinition::PAYLOAD:
2561             if (!safepoint->addNunboxPayload(i, a)) {
2562               return false;
2563             }
2564             break;
2565 #else
2566           case LDefinition::BOX:
2567             if (!safepoint->addBoxedValue(a)) {
2568               return false;
2569             }
2570             break;
2571 #endif
2572           default:
2573             MOZ_CRASH("Bad register type");
2574         }
2575       }
2576     }
2577   }
2578 
2579   return true;
2580 }
2581 
annotateMoveGroups()2582 bool BacktrackingAllocator::annotateMoveGroups() {
2583   // Annotate move groups in the LIR graph with any register that is not
2584   // allocated at that point and can be used as a scratch register. This is
2585   // only required for x86, as other platforms always have scratch registers
2586   // available for use.
2587 #ifdef JS_CODEGEN_X86
2588   LiveRange* range = LiveRange::FallibleNew(alloc(), nullptr, CodePosition(),
2589                                             CodePosition().next());
2590   if (!range) {
2591     return false;
2592   }
2593 
2594   for (size_t i = 0; i < graph.numBlocks(); i++) {
2595     if (mir->shouldCancel("Backtracking Annotate Move Groups")) {
2596       return false;
2597     }
2598 
2599     LBlock* block = graph.getBlock(i);
2600     LInstruction* last = nullptr;
2601     for (LInstructionIterator iter = block->begin(); iter != block->end();
2602          ++iter) {
2603       if (iter->isMoveGroup()) {
2604         CodePosition from = last ? outputOf(last) : entryOf(block);
2605         range->setTo(from.next());
2606         range->setFrom(from);
2607 
2608         for (size_t i = 0; i < AnyRegister::Total; i++) {
2609           PhysicalRegister& reg = registers[i];
2610           if (reg.reg.isFloat() || !reg.allocatable) {
2611             continue;
2612           }
2613 
2614           // This register is unavailable for use if (a) it is in use
2615           // by some live range immediately before the move group,
2616           // or (b) it is an operand in one of the group's moves. The
2617           // latter case handles live ranges which end immediately
2618           // before the move group or start immediately after.
2619           // For (b) we need to consider move groups immediately
2620           // preceding or following this one.
2621 
2622           if (iter->toMoveGroup()->uses(reg.reg.gpr())) {
2623             continue;
2624           }
2625           bool found = false;
2626           LInstructionIterator niter(iter);
2627           for (niter++; niter != block->end(); niter++) {
2628             if (niter->isMoveGroup()) {
2629               if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
2630                 found = true;
2631                 break;
2632               }
2633             } else {
2634               break;
2635             }
2636           }
2637           if (iter != block->begin()) {
2638             LInstructionIterator riter(iter);
2639             do {
2640               riter--;
2641               if (riter->isMoveGroup()) {
2642                 if (riter->toMoveGroup()->uses(reg.reg.gpr())) {
2643                   found = true;
2644                   break;
2645                 }
2646               } else {
2647                 break;
2648               }
2649             } while (riter != block->begin());
2650           }
2651 
2652           LiveRange* existing;
2653           if (found || reg.allocations.contains(range, &existing)) {
2654             continue;
2655           }
2656 
2657           iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
2658           break;
2659         }
2660       } else {
2661         last = *iter;
2662       }
2663     }
2664   }
2665 #endif
2666 
2667   return true;
2668 }
2669 
2670 /////////////////////////////////////////////////////////////////////
2671 // Debugging methods
2672 /////////////////////////////////////////////////////////////////////
2673 
2674 #ifdef JS_JITSPEW
2675 
toString() const2676 UniqueChars LiveRange::toString() const {
2677   AutoEnterOOMUnsafeRegion oomUnsafe;
2678 
2679   UniqueChars buf = JS_smprintf("v%u %u-%u", hasVreg() ? vreg().vreg() : 0,
2680                                 from().bits(), to().bits() - 1);
2681 
2682   if (buf && bundle() && !bundle()->allocation().isBogus()) {
2683     buf = JS_sprintf_append(std::move(buf), " %s",
2684                             bundle()->allocation().toString().get());
2685   }
2686 
2687   buf = JS_sprintf_append(std::move(buf), " {");
2688 
2689   if (buf && hasDefinition()) {
2690     buf = JS_sprintf_append(std::move(buf), " %u_def", from().bits());
2691     if (hasVreg()) {
2692       // If the definition has a fixed requirement, print it too.
2693       const LDefinition* def = vreg().def();
2694       LDefinition::Policy policy = def->policy();
2695       if (policy == LDefinition::FIXED || policy == LDefinition::STACK) {
2696         if (buf) {
2697           buf = JS_sprintf_append(std::move(buf), ":F:%s",
2698                                   def->output()->toString().get());
2699         }
2700       }
2701     }
2702   }
2703 
2704   for (UsePositionIterator iter = usesBegin(); buf && iter; iter++) {
2705     buf = JS_sprintf_append(std::move(buf), " %u_%s", iter->pos.bits(),
2706                             iter->use()->toString().get());
2707   }
2708 
2709   buf = JS_sprintf_append(std::move(buf), " }");
2710 
2711   if (!buf) {
2712     oomUnsafe.crash("LiveRange::toString()");
2713   }
2714 
2715   return buf;
2716 }
2717 
toString() const2718 UniqueChars LiveBundle::toString() const {
2719   AutoEnterOOMUnsafeRegion oomUnsafe;
2720 
2721   UniqueChars buf = JS_smprintf("LB%u(", debugId());
2722 
2723   if (buf) {
2724     if (spillParent()) {
2725       buf = JS_sprintf_append(std::move(buf), "parent=LB%u",
2726                               spillParent()->debugId());
2727     } else {
2728       buf = JS_sprintf_append(std::move(buf), "parent=none");
2729     }
2730   }
2731 
2732   for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter;
2733        iter++) {
2734     if (buf) {
2735       buf = JS_sprintf_append(std::move(buf), "%s %s",
2736                               (iter == rangesBegin()) ? "" : " ##",
2737                               LiveRange::get(*iter)->toString().get());
2738     }
2739   }
2740 
2741   if (buf) {
2742     buf = JS_sprintf_append(std::move(buf), ")");
2743   }
2744 
2745   if (!buf) {
2746     oomUnsafe.crash("LiveBundle::toString()");
2747   }
2748 
2749   return buf;
2750 }
2751 
2752 #endif  // JS_JITSPEW
2753 
dumpLiveRangesByVReg(const char * who)2754 void BacktrackingAllocator::dumpLiveRangesByVReg(const char* who) {
2755 #ifdef JS_JITSPEW
2756   MOZ_ASSERT(!vregs[0u].hasRanges());
2757 
2758   JitSpewCont(JitSpew_RegAlloc, "\n");
2759   JitSpew(JitSpew_RegAlloc, "Live ranges by virtual register (%s):", who);
2760 
2761   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2762     JitSpewHeader(JitSpew_RegAlloc);
2763     JitSpewCont(JitSpew_RegAlloc, "  ");
2764     VirtualRegister& reg = vregs[i];
2765     for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
2766          iter++) {
2767       if (iter != reg.rangesBegin()) {
2768         JitSpewCont(JitSpew_RegAlloc, " ## ");
2769       }
2770       JitSpewCont(JitSpew_RegAlloc, "%s",
2771                   LiveRange::get(*iter)->toString().get());
2772     }
2773     JitSpewCont(JitSpew_RegAlloc, "\n");
2774   }
2775 #endif
2776 }
2777 
dumpLiveRangesByBundle(const char * who)2778 void BacktrackingAllocator::dumpLiveRangesByBundle(const char* who) {
2779 #ifdef JS_JITSPEW
2780   MOZ_ASSERT(!vregs[0u].hasRanges());
2781 
2782   JitSpewCont(JitSpew_RegAlloc, "\n");
2783   JitSpew(JitSpew_RegAlloc, "Live ranges by bundle (%s):", who);
2784 
2785   for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
2786     VirtualRegister& reg = vregs[i];
2787     for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter;
2788          baseIter++) {
2789       LiveRange* range = LiveRange::get(*baseIter);
2790       LiveBundle* bundle = range->bundle();
2791       if (range == bundle->firstRange()) {
2792         JitSpew(JitSpew_RegAlloc, "  %s", bundle->toString().get());
2793       }
2794     }
2795   }
2796 #endif
2797 }
2798 
2799 #ifdef JS_JITSPEW
2800 struct BacktrackingAllocator::PrintLiveRange {
2801   bool& first_;
2802 
PrintLiveRangeBacktrackingAllocator::PrintLiveRange2803   explicit PrintLiveRange(bool& first) : first_(first) {}
2804 
operator ()BacktrackingAllocator::PrintLiveRange2805   void operator()(const LiveRange* range) {
2806     if (first_) {
2807       first_ = false;
2808     } else {
2809       fprintf(stderr, " /");
2810     }
2811     fprintf(stderr, " %s", range->toString().get());
2812   }
2813 };
2814 #endif
2815 
dumpAllocations()2816 void BacktrackingAllocator::dumpAllocations() {
2817 #ifdef JS_JITSPEW
2818   JitSpew(JitSpew_RegAlloc, "Allocations:");
2819 
2820   dumpLiveRangesByBundle("in dumpAllocations()");
2821 
2822   JitSpewCont(JitSpew_RegAlloc, "\n");
2823   JitSpew(JitSpew_RegAlloc, "Allocations by physical register:");
2824 
2825   for (size_t i = 0; i < AnyRegister::Total; i++) {
2826     if (registers[i].allocatable && !registers[i].allocations.empty()) {
2827       JitSpewHeader(JitSpew_RegAlloc);
2828       JitSpewCont(JitSpew_RegAlloc, "  %s:", AnyRegister::FromCode(i).name());
2829       bool first = true;
2830       registers[i].allocations.forEach(PrintLiveRange(first));
2831       JitSpewCont(JitSpew_RegAlloc, "\n");
2832     }
2833   }
2834 
2835   JitSpewCont(JitSpew_RegAlloc, "\n");
2836 #endif  // JS_JITSPEW
2837 }
2838 
2839 ///////////////////////////////////////////////////////////////////////////////
2840 // Heuristic Methods
2841 ///////////////////////////////////////////////////////////////////////////////
2842 
computePriority(LiveBundle * bundle)2843 size_t BacktrackingAllocator::computePriority(LiveBundle* bundle) {
2844   // The priority of a bundle is its total length, so that longer lived
2845   // bundles will be processed before shorter ones (even if the longer ones
2846   // have a low spill weight). See processBundle().
2847   size_t lifetimeTotal = 0;
2848 
2849   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2850        iter++) {
2851     LiveRange* range = LiveRange::get(*iter);
2852     lifetimeTotal += range->to() - range->from();
2853   }
2854 
2855   return lifetimeTotal;
2856 }
2857 
minimalDef(LiveRange * range,LNode * ins)2858 bool BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) {
2859   // Whether this is a minimal range capturing a definition at ins.
2860   return (range->to() <= minimalDefEnd(ins).next()) &&
2861          ((!ins->isPhi() && range->from() == inputOf(ins)) ||
2862           range->from() == outputOf(ins));
2863 }
2864 
minimalUse(LiveRange * range,UsePosition * use)2865 bool BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use) {
2866   // Whether this is a minimal range capturing |use|.
2867   LNode* ins = insData[use->pos];
2868   return (range->from() == inputOf(ins)) &&
2869          (range->to() ==
2870           (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
2871 }
2872 
minimalBundle(LiveBundle * bundle,bool * pfixed)2873 bool BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) {
2874   LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
2875   LiveRange* range = LiveRange::get(*iter);
2876 
2877   if (!range->hasVreg()) {
2878     *pfixed = true;
2879     return true;
2880   }
2881 
2882   // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
2883   // each range into a separate bundle.
2884   if (++iter) {
2885     return false;
2886   }
2887 
2888   if (range->hasDefinition()) {
2889     VirtualRegister& reg = range->vreg();
2890     if (pfixed) {
2891       *pfixed = reg.def()->policy() == LDefinition::FIXED &&
2892                 reg.def()->output()->isRegister();
2893     }
2894     return minimalDef(range, reg.ins());
2895   }
2896 
2897   bool fixed = false, minimal = false, multiple = false;
2898 
2899   for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
2900     if (iter != range->usesBegin()) {
2901       multiple = true;
2902     }
2903 
2904     switch (iter->usePolicy()) {
2905       case LUse::FIXED:
2906         if (fixed) {
2907           return false;
2908         }
2909         fixed = true;
2910         if (minimalUse(range, *iter)) {
2911           minimal = true;
2912         }
2913         break;
2914 
2915       case LUse::REGISTER:
2916         if (minimalUse(range, *iter)) {
2917           minimal = true;
2918         }
2919         break;
2920 
2921       default:
2922         break;
2923     }
2924   }
2925 
2926   // If a range contains a fixed use and at least one other use,
2927   // splitAtAllRegisterUses will split each use into a different bundle.
2928   if (multiple && fixed) {
2929     minimal = false;
2930   }
2931 
2932   if (pfixed) {
2933     *pfixed = fixed;
2934   }
2935   return minimal;
2936 }
2937 
computeSpillWeight(LiveBundle * bundle)2938 size_t BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) {
2939   // Minimal bundles have an extremely high spill weight, to ensure they
2940   // can evict any other bundles and be allocated to a register.
2941   bool fixed;
2942   if (minimalBundle(bundle, &fixed)) {
2943     return fixed ? 2000000 : 1000000;
2944   }
2945 
2946   size_t usesTotal = 0;
2947   fixed = false;
2948 
2949   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2950        iter++) {
2951     LiveRange* range = LiveRange::get(*iter);
2952 
2953     if (range->hasDefinition()) {
2954       VirtualRegister& reg = range->vreg();
2955       if (reg.def()->policy() == LDefinition::FIXED &&
2956           reg.def()->output()->isRegister()) {
2957         usesTotal += 2000;
2958         fixed = true;
2959       } else if (!reg.ins()->isPhi()) {
2960         usesTotal += 2000;
2961       }
2962     }
2963 
2964     usesTotal += range->usesSpillWeight();
2965     if (range->numFixedUses() > 0) {
2966       fixed = true;
2967     }
2968   }
2969 
2970   // Bundles with fixed uses are given a higher spill weight, since they must
2971   // be allocated to a specific register.
2972   if (testbed && fixed) {
2973     usesTotal *= 2;
2974   }
2975 
2976   // Compute spill weight as a use density, lowering the weight for long
2977   // lived bundles with relatively few uses.
2978   size_t lifetimeTotal = computePriority(bundle);
2979   return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
2980 }
2981 
maximumSpillWeight(const LiveBundleVector & bundles)2982 size_t BacktrackingAllocator::maximumSpillWeight(
2983     const LiveBundleVector& bundles) {
2984   size_t maxWeight = 0;
2985   for (size_t i = 0; i < bundles.length(); i++) {
2986     maxWeight = std::max(maxWeight, computeSpillWeight(bundles[i]));
2987   }
2988   return maxWeight;
2989 }
2990 
trySplitAcrossHotcode(LiveBundle * bundle,bool * success)2991 bool BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle,
2992                                                   bool* success) {
2993   // If this bundle has portions that are hot and portions that are cold,
2994   // split it at the boundaries between hot and cold code.
2995 
2996   LiveRange* hotRange = nullptr;
2997 
2998   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
2999        iter++) {
3000     LiveRange* range = LiveRange::get(*iter);
3001     if (hotcode.contains(range, &hotRange)) {
3002       break;
3003     }
3004   }
3005 
3006   // Don't split if there is no hot code in the bundle.
3007   if (!hotRange) {
3008     JitSpew(JitSpew_RegAlloc, "  .. bundle does not contain hot code");
3009     return true;
3010   }
3011 
3012   // Don't split if there is no cold code in the bundle.
3013   bool coldCode = false;
3014   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3015        iter++) {
3016     LiveRange* range = LiveRange::get(*iter);
3017     if (!hotRange->contains(range)) {
3018       coldCode = true;
3019       break;
3020     }
3021   }
3022   if (!coldCode) {
3023     JitSpew(JitSpew_RegAlloc, "  .. bundle does not contain cold code");
3024     return true;
3025   }
3026 
3027   JitSpewIfEnabled(JitSpew_RegAlloc, "  .. split across hot range %s",
3028                    hotRange->toString().get());
3029 
3030   // Tweak the splitting method when compiling wasm code to look at actual
3031   // uses within the hot/cold code. This heuristic is in place as the below
3032   // mechanism regresses several asm.js tests. Hopefully this will be fixed
3033   // soon and this special case removed. See bug 948838.
3034   if (compilingWasm()) {
3035     SplitPositionVector splitPositions;
3036     if (!splitPositions.append(hotRange->from()) ||
3037         !splitPositions.append(hotRange->to())) {
3038       return false;
3039     }
3040     *success = true;
3041     return splitAt(bundle, splitPositions);
3042   }
3043 
3044   LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3045                                                   bundle->spillParent());
3046   if (!hotBundle) {
3047     return false;
3048   }
3049   LiveBundle* preBundle = nullptr;
3050   LiveBundle* postBundle = nullptr;
3051   LiveBundle* coldBundle = nullptr;
3052 
3053   if (testbed) {
3054     coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3055                                          bundle->spillParent());
3056     if (!coldBundle) {
3057       return false;
3058     }
3059   }
3060 
3061   // Accumulate the ranges of hot and cold code in the bundle. Note that
3062   // we are only comparing with the single hot range found, so the cold code
3063   // may contain separate hot ranges.
3064   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3065        iter++) {
3066     LiveRange* range = LiveRange::get(*iter);
3067     LiveRange::Range hot, coldPre, coldPost;
3068     range->intersect(hotRange, &coldPre, &hot, &coldPost);
3069 
3070     if (!hot.empty()) {
3071       if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from,
3072                                                 hot.to)) {
3073         return false;
3074       }
3075     }
3076 
3077     if (!coldPre.empty()) {
3078       if (testbed) {
3079         if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
3080                                                    coldPre.to)) {
3081           return false;
3082         }
3083       } else {
3084         if (!preBundle) {
3085           preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3086                                               bundle->spillParent());
3087           if (!preBundle) {
3088             return false;
3089           }
3090         }
3091         if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from,
3092                                                   coldPre.to)) {
3093           return false;
3094         }
3095       }
3096     }
3097 
3098     if (!coldPost.empty()) {
3099       if (testbed) {
3100         if (!coldBundle->addRangeAndDistributeUses(
3101                 alloc(), range, coldPost.from, coldPost.to)) {
3102           return false;
3103         }
3104       } else {
3105         if (!postBundle) {
3106           postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
3107                                                bundle->spillParent());
3108           if (!postBundle) {
3109             return false;
3110           }
3111         }
3112         if (!postBundle->addRangeAndDistributeUses(
3113                 alloc(), range, coldPost.from, coldPost.to)) {
3114           return false;
3115         }
3116       }
3117     }
3118   }
3119 
3120   MOZ_ASSERT(hotBundle->numRanges() != 0);
3121 
3122   LiveBundleVector newBundles;
3123   if (!newBundles.append(hotBundle)) {
3124     return false;
3125   }
3126 
3127   if (testbed) {
3128     MOZ_ASSERT(coldBundle->numRanges() != 0);
3129     if (!newBundles.append(coldBundle)) {
3130       return false;
3131     }
3132   } else {
3133     MOZ_ASSERT(preBundle || postBundle);
3134     if (preBundle && !newBundles.append(preBundle)) {
3135       return false;
3136     }
3137     if (postBundle && !newBundles.append(postBundle)) {
3138       return false;
3139     }
3140   }
3141 
3142   *success = true;
3143   return splitAndRequeueBundles(bundle, newBundles);
3144 }
3145 
trySplitAfterLastRegisterUse(LiveBundle * bundle,LiveBundle * conflict,bool * success)3146 bool BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle,
3147                                                          LiveBundle* conflict,
3148                                                          bool* success) {
3149   // If this bundle's later uses do not require it to be in a register,
3150   // split it after the last use which does require a register. If conflict
3151   // is specified, only consider register uses before the conflict starts.
3152 
3153   CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
3154 
3155   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3156        iter++) {
3157     LiveRange* range = LiveRange::get(*iter);
3158 
3159     // If the range defines a register, consider that a register use for
3160     // our purposes here.
3161     if (isRegisterDefinition(range)) {
3162       CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
3163       if (!conflict || spillStart < conflict->firstRange()->from()) {
3164         lastUse = lastRegisterFrom = range->from();
3165         lastRegisterTo = spillStart;
3166       }
3167     }
3168 
3169     for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
3170       LNode* ins = insData[iter->pos];
3171 
3172       // Uses in the bundle should be sorted.
3173       MOZ_ASSERT(iter->pos >= lastUse);
3174       lastUse = inputOf(ins);
3175 
3176       if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
3177         if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
3178           lastRegisterFrom = inputOf(ins);
3179           lastRegisterTo = iter->pos.next();
3180         }
3181       }
3182     }
3183   }
3184 
3185   // Can't trim non-register uses off the end by splitting.
3186   if (!lastRegisterFrom.bits()) {
3187     JitSpew(JitSpew_RegAlloc, "  .. bundle has no register uses");
3188     return true;
3189   }
3190   if (lastUse < lastRegisterTo) {
3191     JitSpew(JitSpew_RegAlloc, "  .. bundle's last use is a register use");
3192     return true;
3193   }
3194 
3195   JitSpewIfEnabled(JitSpew_RegAlloc, "  .. split after last register use at %u",
3196                    lastRegisterTo.bits());
3197 
3198   SplitPositionVector splitPositions;
3199   if (!splitPositions.append(lastRegisterTo)) {
3200     return false;
3201   }
3202   *success = true;
3203   return splitAt(bundle, splitPositions);
3204 }
3205 
trySplitBeforeFirstRegisterUse(LiveBundle * bundle,LiveBundle * conflict,bool * success)3206 bool BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
3207                                                            LiveBundle* conflict,
3208                                                            bool* success) {
3209   // If this bundle's earlier uses do not require it to be in a register,
3210   // split it before the first use which does require a register. If conflict
3211   // is specified, only consider register uses after the conflict ends.
3212 
3213   if (isRegisterDefinition(bundle->firstRange())) {
3214     JitSpew(JitSpew_RegAlloc, "  .. bundle is defined by a register");
3215     return true;
3216   }
3217   if (!bundle->firstRange()->hasDefinition()) {
3218     JitSpew(JitSpew_RegAlloc, "  .. bundle does not have definition");
3219     return true;
3220   }
3221 
3222   CodePosition firstRegisterFrom;
3223 
3224   CodePosition conflictEnd;
3225   if (conflict) {
3226     for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter;
3227          iter++) {
3228       LiveRange* range = LiveRange::get(*iter);
3229       if (range->to() > conflictEnd) {
3230         conflictEnd = range->to();
3231       }
3232     }
3233   }
3234 
3235   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3236        iter++) {
3237     LiveRange* range = LiveRange::get(*iter);
3238 
3239     if (!conflict || range->from() > conflictEnd) {
3240       if (range->hasDefinition() && isRegisterDefinition(range)) {
3241         firstRegisterFrom = range->from();
3242         break;
3243       }
3244     }
3245 
3246     for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
3247       LNode* ins = insData[iter->pos];
3248 
3249       if (!conflict || outputOf(ins) >= conflictEnd) {
3250         if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
3251           firstRegisterFrom = inputOf(ins);
3252           break;
3253         }
3254       }
3255     }
3256     if (firstRegisterFrom.bits()) {
3257       break;
3258     }
3259   }
3260 
3261   if (!firstRegisterFrom.bits()) {
3262     // Can't trim non-register uses off the beginning by splitting.
3263     JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
3264     return true;
3265   }
3266 
3267   JitSpewIfEnabled(JitSpew_RegAlloc,
3268                    "  .. split before first register use at %u",
3269                    firstRegisterFrom.bits());
3270 
3271   SplitPositionVector splitPositions;
3272   if (!splitPositions.append(firstRegisterFrom)) {
3273     return false;
3274   }
3275   *success = true;
3276   return splitAt(bundle, splitPositions);
3277 }
3278 
3279 // When splitting a bundle according to a list of split positions, return
3280 // whether a use or range at |pos| should use a different bundle than the last
3281 // position this was called for.
UseNewBundle(const SplitPositionVector & splitPositions,CodePosition pos,size_t * activeSplitPosition)3282 static bool UseNewBundle(const SplitPositionVector& splitPositions,
3283                          CodePosition pos, size_t* activeSplitPosition) {
3284   if (splitPositions.empty()) {
3285     // When the split positions are empty we are splitting at all uses.
3286     return true;
3287   }
3288 
3289   if (*activeSplitPosition == splitPositions.length()) {
3290     // We've advanced past all split positions.
3291     return false;
3292   }
3293 
3294   if (splitPositions[*activeSplitPosition] > pos) {
3295     // We haven't gotten to the next split position yet.
3296     return false;
3297   }
3298 
3299   // We've advanced past the next split position, find the next one which we
3300   // should split at.
3301   while (*activeSplitPosition < splitPositions.length() &&
3302          splitPositions[*activeSplitPosition] <= pos) {
3303     (*activeSplitPosition)++;
3304   }
3305   return true;
3306 }
3307 
HasPrecedingRangeSharingVreg(LiveBundle * bundle,LiveRange * range)3308 static bool HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
3309   MOZ_ASSERT(range->bundle() == bundle);
3310 
3311   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3312        iter++) {
3313     LiveRange* prevRange = LiveRange::get(*iter);
3314     if (prevRange == range) {
3315       return false;
3316     }
3317     if (&prevRange->vreg() == &range->vreg()) {
3318       return true;
3319     }
3320   }
3321 
3322   MOZ_CRASH();
3323 }
3324 
HasFollowingRangeSharingVreg(LiveBundle * bundle,LiveRange * range)3325 static bool HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range) {
3326   MOZ_ASSERT(range->bundle() == bundle);
3327 
3328   bool foundRange = false;
3329   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3330        iter++) {
3331     LiveRange* prevRange = LiveRange::get(*iter);
3332     if (foundRange && &prevRange->vreg() == &range->vreg()) {
3333       return true;
3334     }
3335     if (prevRange == range) {
3336       foundRange = true;
3337     }
3338   }
3339 
3340   MOZ_ASSERT(foundRange);
3341   return false;
3342 }
3343 
splitAt(LiveBundle * bundle,const SplitPositionVector & splitPositions)3344 bool BacktrackingAllocator::splitAt(LiveBundle* bundle,
3345                                     const SplitPositionVector& splitPositions) {
3346   // Split the bundle at the given split points. Register uses which have no
3347   // intervening split points are consolidated into the same bundle. If the
3348   // list of split points is empty, then all register uses are placed in
3349   // minimal bundles.
3350 
3351   // splitPositions should be sorted.
3352   for (size_t i = 1; i < splitPositions.length(); ++i) {
3353     MOZ_ASSERT(splitPositions[i - 1] < splitPositions[i]);
3354   }
3355 
3356   // We don't need to create a new spill bundle if there already is one.
3357   bool spillBundleIsNew = false;
3358   LiveBundle* spillBundle = bundle->spillParent();
3359   if (!spillBundle) {
3360     spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
3361     if (!spillBundle) {
3362       return false;
3363     }
3364     spillBundleIsNew = true;
3365 
3366     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3367          iter++) {
3368       LiveRange* range = LiveRange::get(*iter);
3369 
3370       CodePosition from = range->from();
3371       if (isRegisterDefinition(range)) {
3372         from = minimalDefEnd(insData[from]).next();
3373       }
3374 
3375       if (from < range->to()) {
3376         if (!spillBundle->addRange(alloc(), &range->vreg(), from,
3377                                    range->to())) {
3378           return false;
3379         }
3380 
3381         if (range->hasDefinition() && !isRegisterDefinition(range)) {
3382           spillBundle->lastRange()->setHasDefinition();
3383         }
3384       }
3385     }
3386   }
3387 
3388   LiveBundleVector newBundles;
3389 
3390   // The bundle which ranges are currently being added to.
3391   LiveBundle* activeBundle =
3392       LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3393   if (!activeBundle || !newBundles.append(activeBundle)) {
3394     return false;
3395   }
3396 
3397   // State for use by UseNewBundle.
3398   size_t activeSplitPosition = 0;
3399 
3400   // Make new bundles according to the split positions, and distribute ranges
3401   // and uses to them.
3402   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3403        iter++) {
3404     LiveRange* range = LiveRange::get(*iter);
3405 
3406     if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
3407       activeBundle =
3408           LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3409       if (!activeBundle || !newBundles.append(activeBundle)) {
3410         return false;
3411       }
3412     }
3413 
3414     LiveRange* activeRange = LiveRange::FallibleNew(alloc(), &range->vreg(),
3415                                                     range->from(), range->to());
3416     if (!activeRange) {
3417       return false;
3418     }
3419     activeBundle->addRange(activeRange);
3420 
3421     if (isRegisterDefinition(range)) {
3422       activeRange->setHasDefinition();
3423     }
3424 
3425     while (range->hasUses()) {
3426       UsePosition* use = range->popUse();
3427       LNode* ins = insData[use->pos];
3428 
3429       // Any uses of a register that appear before its definition has
3430       // finished must be associated with the range for that definition.
3431       if (isRegisterDefinition(range) &&
3432           use->pos <= minimalDefEnd(insData[range->from()])) {
3433         activeRange->addUse(use);
3434       } else if (isRegisterUse(use, ins)) {
3435         // Place this register use into a different bundle from the
3436         // last one if there are any split points between the two uses.
3437         // UseNewBundle always returns true if we are splitting at all
3438         // register uses, but we can still reuse the last range and
3439         // bundle if they have uses at the same position, except when
3440         // either use is fixed (the two uses might require incompatible
3441         // registers.)
3442         if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
3443             (!activeRange->hasUses() ||
3444              activeRange->usesBegin()->pos != use->pos ||
3445              activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
3446              use->usePolicy() == LUse::FIXED)) {
3447           activeBundle =
3448               LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
3449           if (!activeBundle || !newBundles.append(activeBundle)) {
3450             return false;
3451           }
3452           activeRange = LiveRange::FallibleNew(alloc(), &range->vreg(),
3453                                                range->from(), range->to());
3454           if (!activeRange) {
3455             return false;
3456           }
3457           activeBundle->addRange(activeRange);
3458         }
3459 
3460         activeRange->addUse(use);
3461       } else {
3462         MOZ_ASSERT(spillBundleIsNew);
3463         spillBundle->rangeFor(use->pos)->addUse(use);
3464       }
3465     }
3466   }
3467 
3468   LiveBundleVector filteredBundles;
3469 
3470   // Trim the ends of ranges in each new bundle when there are no other
3471   // earlier or later ranges in the same bundle with the same vreg.
3472   for (size_t i = 0; i < newBundles.length(); i++) {
3473     LiveBundle* bundle = newBundles[i];
3474 
3475     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;) {
3476       LiveRange* range = LiveRange::get(*iter);
3477 
3478       if (!range->hasDefinition()) {
3479         if (!HasPrecedingRangeSharingVreg(bundle, range)) {
3480           if (range->hasUses()) {
3481             UsePosition* use = *range->usesBegin();
3482             range->setFrom(inputOf(insData[use->pos]));
3483           } else {
3484             bundle->removeRangeAndIncrementIterator(iter);
3485             continue;
3486           }
3487         }
3488       }
3489 
3490       if (!HasFollowingRangeSharingVreg(bundle, range)) {
3491         if (range->hasUses()) {
3492           UsePosition* use = range->lastUse();
3493           range->setTo(use->pos.next());
3494         } else if (range->hasDefinition()) {
3495           range->setTo(minimalDefEnd(insData[range->from()]).next());
3496         } else {
3497           bundle->removeRangeAndIncrementIterator(iter);
3498           continue;
3499         }
3500       }
3501 
3502       iter++;
3503     }
3504 
3505     if (bundle->hasRanges() && !filteredBundles.append(bundle)) {
3506       return false;
3507     }
3508   }
3509 
3510   if (spillBundleIsNew && !filteredBundles.append(spillBundle)) {
3511     return false;
3512   }
3513 
3514   return splitAndRequeueBundles(bundle, filteredBundles);
3515 }
3516 
splitAcrossCalls(LiveBundle * bundle)3517 bool BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle) {
3518   // Split the bundle to separate register uses and non-register uses and
3519   // allow the vreg to be spilled across its range.
3520 
3521   // Find the locations of all calls in the bundle's range.
3522   SplitPositionVector callPositions;
3523   for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
3524        iter++) {
3525     LiveRange* range = LiveRange::get(*iter);
3526     CallRange searchRange(range->from(), range->to());
3527     CallRange* callRange;
3528     if (!callRanges.contains(&searchRange, &callRange)) {
3529       // There are no calls inside this range.
3530       continue;
3531     }
3532     MOZ_ASSERT(range->covers(callRange->range.from));
3533 
3534     // The search above returns an arbitrary call within the range. Walk
3535     // backwards to find the first call in the range.
3536     for (CallRangeList::reverse_iterator riter =
3537              callRangesList.rbegin(callRange);
3538          riter != callRangesList.rend(); ++riter) {
3539       CodePosition pos = riter->range.from;
3540       if (range->covers(pos)) {
3541         callRange = *riter;
3542       } else {
3543         break;
3544       }
3545     }
3546 
3547     // Add all call positions within the range, by walking forwards.
3548     for (CallRangeList::iterator iter = callRangesList.begin(callRange);
3549          iter != callRangesList.end(); ++iter) {
3550       CodePosition pos = iter->range.from;
3551       if (!range->covers(pos)) {
3552         break;
3553       }
3554 
3555       // Calls at the beginning of the range are ignored; there is no splitting
3556       // to do.
3557       if (range->covers(pos.previous())) {
3558         MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
3559         if (!callPositions.append(pos)) {
3560           return false;
3561         }
3562       }
3563     }
3564   }
3565   MOZ_ASSERT(callPositions.length());
3566 
3567 #ifdef JS_JITSPEW
3568   JitSpewStart(JitSpew_RegAlloc, "  .. split across calls at ");
3569   for (size_t i = 0; i < callPositions.length(); ++i) {
3570     JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "",
3571                 callPositions[i].bits());
3572   }
3573   JitSpewFin(JitSpew_RegAlloc);
3574 #endif
3575 
3576   return splitAt(bundle, callPositions);
3577 }
3578 
chooseBundleSplit(LiveBundle * bundle,bool fixed,LiveBundle * conflict)3579 bool BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed,
3580                                               LiveBundle* conflict) {
3581   bool success = false;
3582 
3583   JitSpew(JitSpew_RegAlloc, "  Splitting %s ..", bundle->toString().get());
3584 
3585   if (!trySplitAcrossHotcode(bundle, &success)) {
3586     return false;
3587   }
3588   if (success) {
3589     return true;
3590   }
3591 
3592   if (fixed) {
3593     return splitAcrossCalls(bundle);
3594   }
3595 
3596   if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) {
3597     return false;
3598   }
3599   if (success) {
3600     return true;
3601   }
3602 
3603   if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) {
3604     return false;
3605   }
3606   if (success) {
3607     return true;
3608   }
3609 
3610   // Split at all register uses.
3611   SplitPositionVector emptyPositions;
3612   return splitAt(bundle, emptyPositions);
3613 }
3614