1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_BacktrackingAllocator_h
8 #define jit_BacktrackingAllocator_h
9 
10 #include "mozilla/Array.h"
11 #include "mozilla/Attributes.h"
12 
13 #include "ds/PriorityQueue.h"
14 #include "ds/SplayTree.h"
15 #include "jit/RegisterAllocator.h"
16 #include "jit/StackSlotAllocator.h"
17 
18 // Gives better traces in Nightly/debug builds (could be EARLY_BETA_OR_EARLIER)
19 #if defined(NIGHTLY_BUILD) || defined(DEBUG)
20 #  define AVOID_INLINE_FOR_DEBUGGING MOZ_NEVER_INLINE
21 #else
22 #  define AVOID_INLINE_FOR_DEBUGGING
23 #endif
24 
25 // Backtracking priority queue based register allocator based on that described
26 // in the following blog post:
27 //
28 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
29 
30 namespace js {
31 namespace jit {
32 
33 class Requirement {
34  public:
35   enum Kind { NONE, REGISTER, FIXED };
36 
Requirement()37   Requirement() : kind_(NONE) {}
38 
Requirement(Kind kind)39   explicit Requirement(Kind kind) : kind_(kind) {
40     // FIXED has a dedicated constructor.
41     MOZ_ASSERT(kind != FIXED);
42   }
43 
Requirement(LAllocation fixed)44   explicit Requirement(LAllocation fixed) : kind_(FIXED), allocation_(fixed) {
45     MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
46   }
47 
kind()48   Kind kind() const { return kind_; }
49 
allocation()50   LAllocation allocation() const {
51     MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
52     return allocation_;
53   }
54 
merge(const Requirement & newRequirement)55   [[nodiscard]] bool merge(const Requirement& newRequirement) {
56     // Merge newRequirement with any existing requirement, returning false
57     // if the new and old requirements conflict.
58 
59     if (newRequirement.kind() == Requirement::FIXED) {
60       if (kind() == Requirement::FIXED) {
61         return newRequirement.allocation() == allocation();
62       }
63       *this = newRequirement;
64       return true;
65     }
66 
67     MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
68     if (kind() == Requirement::FIXED) {
69       return allocation().isRegister();
70     }
71 
72     *this = newRequirement;
73     return true;
74   }
75 
76  private:
77   Kind kind_;
78   LAllocation allocation_;
79 };
80 
81 struct UsePosition : public TempObject,
82                      public InlineForwardListNode<UsePosition> {
83  private:
84   // A UsePosition is an LUse* with a CodePosition.  UsePosition also has an
85   // optimization that allows access to the associated LUse::Policy without
86   // dereferencing memory: the policy is encoded in the low bits of the LUse*.
87   //
88   // Note however that because LUse* is uintptr_t-aligned, on 32-bit systems
89   // there are only 4 encodable values, for more than 4 use policies; in that
90   // case we allocate the common LUse::ANY, LUse::REGISTER, and LUse::FIXED use
91   // policies to tags, and use tag 0x3 to indicate that dereferencing the LUse
92   // is necessary to get the policy (KEEPALIVE or STACK, in that case).
93   uintptr_t use_;
94   static_assert(LUse::ANY < 0x3,
95                 "LUse::ANY can be represented in low tag on 32-bit systems");
96   static_assert(LUse::REGISTER < 0x3,
97                 "LUse::REGISTER can be represented in tag on 32-bit systems");
98   static_assert(LUse::FIXED < 0x3,
99                 "LUse::FIXED can be represented in tag on 32-bit systems");
100 
101   static constexpr uintptr_t PolicyMask = sizeof(uintptr_t) - 1;
102   static constexpr uintptr_t UseMask = ~PolicyMask;
103 
setUseUsePosition104   void setUse(LUse* use) {
105     // RECOVERED_INPUT is used by snapshots and ignored when building the
106     // liveness information. Thus we can safely assume that no such value
107     // would be seen.
108     MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
109 
110     uintptr_t policyBits = use->policy();
111 #ifndef JS_64BIT
112     // On a 32-bit machine, LUse::KEEPALIVE and LUse::STACK are accessed by
113     // dereferencing the use pointer.
114     if (policyBits >= PolicyMask) {
115       policyBits = PolicyMask;
116     }
117 #endif
118     use_ = uintptr_t(use) | policyBits;
119     MOZ_ASSERT(use->policy() == usePolicy());
120   }
121 
122  public:
123   CodePosition pos;
124 
useUsePosition125   LUse* use() const { return reinterpret_cast<LUse*>(use_ & UseMask); }
126 
usePolicyUsePosition127   LUse::Policy usePolicy() const {
128     uintptr_t bits = use_ & PolicyMask;
129 #ifndef JS_64BIT
130     // On 32-bit machines, reach out to memory if it's LUse::KEEPALIVE or
131     // LUse::STACK.
132     if (bits == PolicyMask) {
133       return use()->policy();
134     }
135 #endif
136     LUse::Policy policy = LUse::Policy(bits);
137     MOZ_ASSERT(use()->policy() == policy);
138     return policy;
139   }
140 
UsePositionUsePosition141   UsePosition(LUse* use, CodePosition pos) : pos(pos) {
142     // Verify that the usedAtStart() flag is consistent with the
143     // subposition. For now ignore fixed registers, because they
144     // are handled specially around calls.
145     MOZ_ASSERT_IF(!use->isFixedRegister(),
146                   pos.subpos() == (use->usedAtStart() ? CodePosition::INPUT
147                                                       : CodePosition::OUTPUT));
148     setUse(use);
149   }
150 };
151 
152 using UsePositionIterator = InlineForwardListIterator<UsePosition>;
153 
154 // Backtracking allocator data structures overview.
155 //
156 // LiveRange: A continuous range of positions where a virtual register is live.
157 // LiveBundle: A set of LiveRanges which do not overlap.
158 // VirtualRegister: A set of all LiveRanges used for some LDefinition.
159 //
160 // The allocator first performs a liveness ananlysis on the LIR graph which
161 // constructs LiveRanges for each VirtualRegister, determining where the
162 // registers are live.
163 //
164 // The ranges are then bundled together according to heuristics, and placed on
165 // the allocation queue.
166 //
167 // As bundles are removed from the allocation queue, we attempt to find a
168 // physical register or stack slot allocation for all ranges in the removed
169 // bundle, possibly evicting already-allocated bundles. See processBundle()
170 // for details.
171 //
172 // If we are not able to allocate a bundle, it is split according to heuristics
173 // into two or more smaller bundles which cover all the ranges of the original.
174 // These smaller bundles are then allocated independently.
175 
176 class LiveBundle;
177 
178 class LiveRange : public TempObject {
179  public:
180   // Linked lists are used to keep track of the ranges in each LiveBundle and
181   // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
182   // these auxiliary classes to keep things straight.
183   class BundleLink : public InlineForwardListNode<BundleLink> {};
184   class RegisterLink : public InlineForwardListNode<RegisterLink> {};
185 
186   using BundleLinkIterator = InlineForwardListIterator<BundleLink>;
187   using RegisterLinkIterator = InlineForwardListIterator<RegisterLink>;
188 
189   // Links in the lists in LiveBundle and VirtualRegister.
190   BundleLink bundleLink;
191   RegisterLink registerLink;
192 
get(BundleLink * link)193   static LiveRange* get(BundleLink* link) {
194     return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
195                                         offsetof(LiveRange, bundleLink));
196   }
get(RegisterLink * link)197   static LiveRange* get(RegisterLink* link) {
198     return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
199                                         offsetof(LiveRange, registerLink));
200   }
201 
202   struct Range {
203     // The beginning of this range, inclusive.
204     CodePosition from;
205 
206     // The end of this range, exclusive.
207     CodePosition to;
208 
209     Range() = default;
210 
RangeRange211     Range(CodePosition from, CodePosition to) : from(from), to(to) {
212       MOZ_ASSERT(!empty());
213     }
214 
emptyRange215     bool empty() {
216       MOZ_ASSERT(from <= to);
217       return from == to;
218     }
219   };
220 
221  private:
222   // The virtual register this range is for, or zero if this does not have a
223   // virtual register (for example, it is in the callRanges bundle).
224   uint32_t vreg_;
225 
226   // The bundle containing this range, null if liveness information is being
227   // constructed and we haven't started allocating bundles yet.
228   LiveBundle* bundle_;
229 
230   // The code positions in this range.
231   Range range_;
232 
233   // All uses of the virtual register in this range, ordered by location.
234   InlineForwardList<UsePosition> uses_;
235 
236   // Total spill weight that calculate from all the uses' policy. Because the
237   // use's policy can't be changed after initialization, we can update the
238   // weight whenever a use is added to or remove from this range. This way, we
239   // don't need to iterate all the uses every time computeSpillWeight() is
240   // called.
241   size_t usesSpillWeight_;
242 
243   // Number of uses that have policy LUse::FIXED.
244   uint32_t numFixedUses_;
245 
246   // Whether this range contains the virtual register's definition.
247   bool hasDefinition_;
248 
LiveRange(uint32_t vreg,Range range)249   LiveRange(uint32_t vreg, Range range)
250       : vreg_(vreg),
251         bundle_(nullptr),
252         range_(range),
253         usesSpillWeight_(0),
254         numFixedUses_(0),
255         hasDefinition_(false)
256 
257   {
258     MOZ_ASSERT(!range.empty());
259   }
260 
261   void noteAddedUse(UsePosition* use);
262   void noteRemovedUse(UsePosition* use);
263 
264  public:
FallibleNew(TempAllocator & alloc,uint32_t vreg,CodePosition from,CodePosition to)265   static LiveRange* FallibleNew(TempAllocator& alloc, uint32_t vreg,
266                                 CodePosition from, CodePosition to) {
267     return new (alloc.fallible()) LiveRange(vreg, Range(from, to));
268   }
269 
vreg()270   uint32_t vreg() const {
271     MOZ_ASSERT(hasVreg());
272     return vreg_;
273   }
hasVreg()274   bool hasVreg() const { return vreg_ != 0; }
275 
bundle()276   LiveBundle* bundle() const { return bundle_; }
277 
from()278   CodePosition from() const { return range_.from; }
to()279   CodePosition to() const { return range_.to; }
covers(CodePosition pos)280   bool covers(CodePosition pos) const { return pos >= from() && pos < to(); }
281 
282   // Whether this range wholly contains other.
283   bool contains(LiveRange* other) const;
284 
285   // Intersect this range with other, returning the subranges of this
286   // that are before, inside, or after other.
287   void intersect(LiveRange* other, Range* pre, Range* inside,
288                  Range* post) const;
289 
290   // Whether this range has any intersection with other.
291   bool intersects(LiveRange* other) const;
292 
usesBegin()293   UsePositionIterator usesBegin() const { return uses_.begin(); }
lastUse()294   UsePosition* lastUse() const { return uses_.back(); }
hasUses()295   bool hasUses() const { return !!usesBegin(); }
296   UsePosition* popUse();
297 
hasDefinition()298   bool hasDefinition() const { return hasDefinition_; }
299 
setFrom(CodePosition from)300   void setFrom(CodePosition from) {
301     range_.from = from;
302     MOZ_ASSERT(!range_.empty());
303   }
setTo(CodePosition to)304   void setTo(CodePosition to) {
305     range_.to = to;
306     MOZ_ASSERT(!range_.empty());
307   }
308 
setBundle(LiveBundle * bundle)309   void setBundle(LiveBundle* bundle) { bundle_ = bundle; }
310 
311   void addUse(UsePosition* use);
312   void distributeUses(LiveRange* other);
313 
setHasDefinition()314   void setHasDefinition() {
315     MOZ_ASSERT(!hasDefinition_);
316     hasDefinition_ = true;
317   }
318 
usesSpillWeight()319   size_t usesSpillWeight() { return usesSpillWeight_; }
numFixedUses()320   uint32_t numFixedUses() { return numFixedUses_; }
321 
322 #ifdef JS_JITSPEW
323   // Return a string describing this range.
324   UniqueChars toString() const;
325 #endif
326 
327   // Comparator for use in range splay trees.
compare(LiveRange * v0,LiveRange * v1)328   static int compare(LiveRange* v0, LiveRange* v1) {
329     // LiveRange includes 'from' but excludes 'to'.
330     if (v0->to() <= v1->from()) {
331       return -1;
332     }
333     if (v0->from() >= v1->to()) {
334       return 1;
335     }
336     return 0;
337   }
338 };
339 
340 // Tracks information about bundles that should all be spilled to the same
341 // physical location. At the beginning of allocation, each bundle has its own
342 // spill set. As bundles are split, the new smaller bundles continue to use the
343 // same spill set.
344 class SpillSet : public TempObject {
345   // All bundles with this spill set which have been spilled. All bundles in
346   // this list will be given the same physical slot.
347   Vector<LiveBundle*, 1, JitAllocPolicy> list_;
348 
SpillSet(TempAllocator & alloc)349   explicit SpillSet(TempAllocator& alloc) : list_(alloc) {}
350 
351  public:
New(TempAllocator & alloc)352   static SpillSet* New(TempAllocator& alloc) {
353     return new (alloc) SpillSet(alloc);
354   }
355 
addSpilledBundle(LiveBundle * bundle)356   [[nodiscard]] bool addSpilledBundle(LiveBundle* bundle) {
357     return list_.append(bundle);
358   }
numSpilledBundles()359   size_t numSpilledBundles() const { return list_.length(); }
spilledBundle(size_t i)360   LiveBundle* spilledBundle(size_t i) const { return list_[i]; }
361 
362   void setAllocation(LAllocation alloc);
363 };
364 
365 // A set of live ranges which are all pairwise disjoint. The register allocator
366 // attempts to find allocations for an entire bundle, and if it fails the
367 // bundle will be broken into smaller ones which are allocated independently.
368 class LiveBundle : public TempObject {
369   // Set to use if this bundle or one it is split into is spilled.
370   SpillSet* spill_;
371 
372   // All the ranges in this set, ordered by location.
373   InlineForwardList<LiveRange::BundleLink> ranges_;
374 
375   // Allocation to use for ranges in this set, bogus if unallocated or spilled
376   // and not yet given a physical stack slot.
377   LAllocation alloc_;
378 
379   // Bundle which entirely contains this one and has no register uses. This
380   // may or may not be spilled by the allocator, but it can be spilled and
381   // will not be split.
382   LiveBundle* spillParent_;
383 
LiveBundle(SpillSet * spill,LiveBundle * spillParent)384   LiveBundle(SpillSet* spill, LiveBundle* spillParent)
385       : spill_(spill), spillParent_(spillParent) {}
386 
387  public:
FallibleNew(TempAllocator & alloc,SpillSet * spill,LiveBundle * spillParent)388   static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill,
389                                  LiveBundle* spillParent) {
390     return new (alloc.fallible()) LiveBundle(spill, spillParent);
391   }
392 
spillSet()393   SpillSet* spillSet() const { return spill_; }
setSpillSet(SpillSet * spill)394   void setSpillSet(SpillSet* spill) { spill_ = spill; }
395 
rangesBegin()396   LiveRange::BundleLinkIterator rangesBegin() const { return ranges_.begin(); }
hasRanges()397   bool hasRanges() const { return !!rangesBegin(); }
firstRange()398   LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
lastRange()399   LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
400   LiveRange* rangeFor(CodePosition pos) const;
401   void removeRange(LiveRange* range);
removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator & iter)402   void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
403     ranges_.removeAndIncrement(iter);
404   }
405   void addRange(LiveRange* range);
406   [[nodiscard]] bool addRange(TempAllocator& alloc, uint32_t vreg,
407                               CodePosition from, CodePosition to);
408   [[nodiscard]] bool addRangeAndDistributeUses(TempAllocator& alloc,
409                                                LiveRange* oldRange,
410                                                CodePosition from,
411                                                CodePosition to);
412   LiveRange* popFirstRange();
413 #ifdef DEBUG
414   size_t numRanges() const;
415 #endif
416 
allocation()417   LAllocation allocation() const { return alloc_; }
setAllocation(LAllocation alloc)418   void setAllocation(LAllocation alloc) { alloc_ = alloc; }
419 
spillParent()420   LiveBundle* spillParent() const { return spillParent_; }
421 
422 #ifdef JS_JITSPEW
423   // Return a string describing this bundle.
424   UniqueChars toString() const;
425 #endif
426 };
427 
428 // Information about the allocation for a virtual register.
429 class VirtualRegister {
430   // Instruction which defines this register.
431   LNode* ins_ = nullptr;
432 
433   // Definition in the instruction for this register.
434   LDefinition* def_ = nullptr;
435 
436   // All live ranges for this register. These may overlap each other, and are
437   // ordered by their start position.
438   InlineForwardList<LiveRange::RegisterLink> ranges_;
439 
440   // Whether def_ is a temp or an output.
441   bool isTemp_ = false;
442 
443   // Whether this vreg is an input for some phi. This use is not reflected in
444   // any range on the vreg.
445   bool usedByPhi_ = false;
446 
447   // If this register's definition is MUST_REUSE_INPUT, whether a copy must
448   // be introduced before the definition that relaxes the policy.
449   bool mustCopyInput_ = false;
450 
451   void operator=(const VirtualRegister&) = delete;
452   VirtualRegister(const VirtualRegister&) = delete;
453 
454  public:
455   VirtualRegister() = default;
456 
init(LNode * ins,LDefinition * def,bool isTemp)457   void init(LNode* ins, LDefinition* def, bool isTemp) {
458     MOZ_ASSERT(!ins_);
459     ins_ = ins;
460     def_ = def;
461     isTemp_ = isTemp;
462   }
463 
ins()464   LNode* ins() const { return ins_; }
def()465   LDefinition* def() const { return def_; }
type()466   LDefinition::Type type() const { return def()->type(); }
vreg()467   uint32_t vreg() const { return def()->virtualRegister(); }
isCompatible(const AnyRegister & r)468   bool isCompatible(const AnyRegister& r) const {
469     return def_->isCompatibleReg(r);
470   }
isCompatible(const VirtualRegister & vr)471   bool isCompatible(const VirtualRegister& vr) const {
472     return def_->isCompatibleDef(*vr.def_);
473   }
isTemp()474   bool isTemp() const { return isTemp_; }
475 
setUsedByPhi()476   void setUsedByPhi() { usedByPhi_ = true; }
usedByPhi()477   bool usedByPhi() { return usedByPhi_; }
478 
setMustCopyInput()479   void setMustCopyInput() { mustCopyInput_ = true; }
mustCopyInput()480   bool mustCopyInput() { return mustCopyInput_; }
481 
rangesBegin()482   LiveRange::RegisterLinkIterator rangesBegin() const {
483     return ranges_.begin();
484   }
rangesBegin(LiveRange * range)485   LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const {
486     return ranges_.begin(&range->registerLink);
487   }
hasRanges()488   bool hasRanges() const { return !!rangesBegin(); }
firstRange()489   LiveRange* firstRange() const { return LiveRange::get(*rangesBegin()); }
lastRange()490   LiveRange* lastRange() const { return LiveRange::get(ranges_.back()); }
491   LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
492   void removeRange(LiveRange* range);
493   void addRange(LiveRange* range);
494 
removeRangeAndIncrement(LiveRange::RegisterLinkIterator & iter)495   void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) {
496     ranges_.removeAndIncrement(iter);
497   }
498 
firstBundle()499   LiveBundle* firstBundle() const { return firstRange()->bundle(); }
500 
501   [[nodiscard]] bool addInitialRange(TempAllocator& alloc, CodePosition from,
502                                      CodePosition to, size_t* numRanges);
503   void addInitialUse(UsePosition* use);
504   void setInitialDefinition(CodePosition from);
505 };
506 
507 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
508 // where to split.
509 using SplitPositionVector = js::Vector<CodePosition, 4, SystemAllocPolicy>;
510 
511 class BacktrackingAllocator : protected RegisterAllocator {
512   friend class JSONSpewer;
513 
514   // This flag is set when testing new allocator modifications.
515   bool testbed;
516 
517   BitSet* liveIn;
518   FixedList<VirtualRegister> vregs;
519 
520   // Allocation state.
521   StackSlotAllocator stackSlotAllocator;
522 
523   // Priority queue element: a bundle and the associated priority.
524   struct QueueItem {
525     LiveBundle* bundle;
526 
QueueItemQueueItem527     QueueItem(LiveBundle* bundle, size_t priority)
528         : bundle(bundle), priority_(priority) {}
529 
priorityQueueItem530     static size_t priority(const QueueItem& v) { return v.priority_; }
531 
532    private:
533     size_t priority_;
534   };
535 
536   PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
537 
538   using LiveRangeSet = SplayTree<LiveRange*, LiveRange>;
539 
540   // Each physical register is associated with the set of ranges over which
541   // that register is currently allocated.
542   struct PhysicalRegister {
543     bool allocatable;
544     AnyRegister reg;
545     LiveRangeSet allocations;
546 
PhysicalRegisterPhysicalRegister547     PhysicalRegister() : allocatable(false) {}
548   };
549   mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
550 
551   // Ranges of code which are considered to be hot, for which good allocation
552   // should be prioritized.
553   LiveRangeSet hotcode;
554 
555   struct CallRange : public TempObject, public InlineListNode<CallRange> {
556     LiveRange::Range range;
557 
CallRangeCallRange558     CallRange(CodePosition from, CodePosition to) : range(from, to) {}
559 
560     // Comparator for use in splay tree.
compareCallRange561     static int compare(CallRange* v0, CallRange* v1) {
562       if (v0->range.to <= v1->range.from) {
563         return -1;
564       }
565       if (v0->range.from >= v1->range.to) {
566         return 1;
567       }
568       return 0;
569     }
570   };
571 
572   // Ranges where all registers must be spilled due to call instructions.
573   using CallRangeList = InlineList<CallRange>;
574   CallRangeList callRangesList;
575   SplayTree<CallRange*, CallRange> callRanges;
576 
577   // Information about an allocated stack slot.
578   struct SpillSlot : public TempObject,
579                      public InlineForwardListNode<SpillSlot> {
580     LStackSlot alloc;
581     LiveRangeSet allocated;
582 
SpillSlotSpillSlot583     SpillSlot(uint32_t slot, LifoAlloc* alloc)
584         : alloc(slot), allocated(alloc) {}
585   };
586   using SpillSlotList = InlineForwardList<SpillSlot>;
587 
588   // All allocated slots of each width.
589   SpillSlotList normalSlots, doubleSlots, quadSlots;
590 
591   Vector<LiveBundle*, 4, SystemAllocPolicy> spilledBundles;
592 
593  public:
BacktrackingAllocator(MIRGenerator * mir,LIRGenerator * lir,LIRGraph & graph,bool testbed)594   BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph,
595                         bool testbed)
596       : RegisterAllocator(mir, lir, graph),
597         testbed(testbed),
598         liveIn(nullptr),
599         callRanges(nullptr) {}
600 
601   [[nodiscard]] bool go();
602 
SpillWeightFromUsePolicy(LUse::Policy policy)603   static size_t SpillWeightFromUsePolicy(LUse::Policy policy) {
604     switch (policy) {
605       case LUse::ANY:
606         return 1000;
607 
608       case LUse::REGISTER:
609       case LUse::FIXED:
610         return 2000;
611 
612       default:
613         return 0;
614     }
615   }
616 
617  private:
618   using LiveRangeVector = Vector<LiveRange*, 4, SystemAllocPolicy>;
619   using LiveBundleVector = Vector<LiveBundle*, 4, SystemAllocPolicy>;
620 
621   // Liveness methods.
622   [[nodiscard]] bool init();
623   [[nodiscard]] bool buildLivenessInfo();
624 
625   [[nodiscard]] bool addInitialFixedRange(AnyRegister reg, CodePosition from,
626                                           CodePosition to);
627 
vreg(const LDefinition * def)628   VirtualRegister& vreg(const LDefinition* def) {
629     return vregs[def->virtualRegister()];
630   }
vreg(const LAllocation * alloc)631   VirtualRegister& vreg(const LAllocation* alloc) {
632     MOZ_ASSERT(alloc->isUse());
633     return vregs[alloc->toUse()->virtualRegister()];
634   }
635 
636   // Allocation methods.
637   [[nodiscard]] bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
638   [[nodiscard]] bool tryMergeReusedRegister(VirtualRegister& def,
639                                             VirtualRegister& input);
640   void allocateStackDefinition(VirtualRegister& reg);
641   [[nodiscard]] bool mergeAndQueueRegisters();
642   [[nodiscard]] bool tryAllocateFixed(LiveBundle* bundle,
643                                       Requirement requirement, bool* success,
644                                       bool* pfixed,
645                                       LiveBundleVector& conflicting);
646   [[nodiscard]] bool tryAllocateNonFixed(LiveBundle* bundle,
647                                          Requirement requirement,
648                                          Requirement hint, bool* success,
649                                          bool* pfixed,
650                                          LiveBundleVector& conflicting);
651   [[nodiscard]] bool processBundle(MIRGenerator* mir, LiveBundle* bundle);
652   [[nodiscard]] bool computeRequirement(LiveBundle* bundle,
653                                         Requirement* prequirement,
654                                         Requirement* phint);
655   [[nodiscard]] bool tryAllocateRegister(PhysicalRegister& r,
656                                          LiveBundle* bundle, bool* success,
657                                          bool* pfixed,
658                                          LiveBundleVector& conflicting);
659   [[nodiscard]] bool evictBundle(LiveBundle* bundle);
660   [[nodiscard]] bool splitAndRequeueBundles(LiveBundle* bundle,
661                                             const LiveBundleVector& newBundles);
662   [[nodiscard]] bool spill(LiveBundle* bundle);
663   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool
664   tryAllocatingRegistersForSpillBundles();
665 
666   bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
667   bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
668   bool isRegisterDefinition(LiveRange* range);
669   [[nodiscard]] bool pickStackSlot(SpillSet* spill);
670   [[nodiscard]] bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
671 
672   // Reification methods.
673   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool pickStackSlots();
674   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool resolveControlFlow();
675   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool reifyAllocations();
676   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool populateSafepoints();
677   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool annotateMoveGroups();
678   [[nodiscard]] AVOID_INLINE_FOR_DEBUGGING bool deadRange(LiveRange* range);
679   size_t findFirstNonCallSafepoint(CodePosition from);
680   size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
681   void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
682 
addMove(LMoveGroup * moves,LiveRange * from,LiveRange * to,LDefinition::Type type)683   [[nodiscard]] bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
684                              LDefinition::Type type) {
685     LAllocation fromAlloc = from->bundle()->allocation();
686     LAllocation toAlloc = to->bundle()->allocation();
687     MOZ_ASSERT(fromAlloc != toAlloc);
688     return moves->add(fromAlloc, toAlloc, type);
689   }
690 
moveInput(LInstruction * ins,LiveRange * from,LiveRange * to,LDefinition::Type type)691   [[nodiscard]] bool moveInput(LInstruction* ins, LiveRange* from,
692                                LiveRange* to, LDefinition::Type type) {
693     if (from->bundle()->allocation() == to->bundle()->allocation()) {
694       return true;
695     }
696     LMoveGroup* moves = getInputMoveGroup(ins);
697     return addMove(moves, from, to, type);
698   }
699 
moveAfter(LInstruction * ins,LiveRange * from,LiveRange * to,LDefinition::Type type)700   [[nodiscard]] bool moveAfter(LInstruction* ins, LiveRange* from,
701                                LiveRange* to, LDefinition::Type type) {
702     if (from->bundle()->allocation() == to->bundle()->allocation()) {
703       return true;
704     }
705     LMoveGroup* moves = getMoveGroupAfter(ins);
706     return addMove(moves, from, to, type);
707   }
708 
moveAtExit(LBlock * block,LiveRange * from,LiveRange * to,LDefinition::Type type)709   [[nodiscard]] bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
710                                 LDefinition::Type type) {
711     if (from->bundle()->allocation() == to->bundle()->allocation()) {
712       return true;
713     }
714     LMoveGroup* moves = block->getExitMoveGroup(alloc());
715     return addMove(moves, from, to, type);
716   }
717 
moveAtEntry(LBlock * block,LiveRange * from,LiveRange * to,LDefinition::Type type)718   [[nodiscard]] bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
719                                  LDefinition::Type type) {
720     if (from->bundle()->allocation() == to->bundle()->allocation()) {
721       return true;
722     }
723     LMoveGroup* moves = block->getEntryMoveGroup(alloc());
724     return addMove(moves, from, to, type);
725   }
726 
727   [[nodiscard]] bool moveAtEdge(LBlock* predecessor, LBlock* successor,
728                                 LiveRange* from, LiveRange* to,
729                                 LDefinition::Type type);
730 
731   // Debugging methods.
732   void dumpAllocations();
733 
734   struct PrintLiveRange;
735 
736   bool minimalDef(LiveRange* range, LNode* ins);
737   bool minimalUse(LiveRange* range, UsePosition* use);
738   bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
739 
740   // Heuristic methods.
741 
742   size_t computePriority(LiveBundle* bundle);
743   size_t computeSpillWeight(LiveBundle* bundle);
744 
745   size_t maximumSpillWeight(const LiveBundleVector& bundles);
746 
747   [[nodiscard]] bool chooseBundleSplit(LiveBundle* bundle, bool fixed,
748                                        LiveBundle* conflict);
749 
750   [[nodiscard]] bool splitAt(LiveBundle* bundle,
751                              const SplitPositionVector& splitPositions);
752   [[nodiscard]] bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
753   [[nodiscard]] bool trySplitAfterLastRegisterUse(LiveBundle* bundle,
754                                                   LiveBundle* conflict,
755                                                   bool* success);
756   [[nodiscard]] bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle,
757                                                     LiveBundle* conflict,
758                                                     bool* success);
759   [[nodiscard]] bool splitAcrossCalls(LiveBundle* bundle);
760 
compilingWasm()761   bool compilingWasm() { return mir->outerInfo().compilingWasm(); }
762 
763   void dumpVregs(const char* who);
764 };
765 
766 }  // namespace jit
767 }  // namespace js
768 
769 #endif /* jit_BacktrackingAllocator_h */
770