1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_LIR_h
8 #define jit_LIR_h
9 
10 // This file declares the core data structures for LIR: storage allocations for
11 // inputs and outputs, as well as the interface instructions must conform to.
12 
13 #include "mozilla/Array.h"
14 #include "mozilla/Casting.h"
15 
16 #include "jit/Bailouts.h"
17 #include "jit/FixedList.h"
18 #include "jit/InlineList.h"
19 #include "jit/JitAllocPolicy.h"
20 #include "jit/LIROpsGenerated.h"
21 #include "jit/MIR.h"
22 #include "jit/MIRGraph.h"
23 #include "jit/Registers.h"
24 #include "jit/Safepoints.h"
25 #include "util/Memory.h"
26 
27 namespace js {
28 namespace jit {
29 
30 class LUse;
31 class LGeneralReg;
32 class LFloatReg;
33 class LStackSlot;
34 class LStackArea;
35 class LArgument;
36 class LConstantIndex;
37 class LInstruction;
38 class LDefinition;
39 class MBasicBlock;
40 class MIRGenerator;
41 
42 static const uint32_t VREG_INCREMENT = 1;
43 
44 static const uint32_t THIS_FRAME_ARGSLOT = 0;
45 
46 #if defined(JS_NUNBOX32)
47 #  define BOX_PIECES 2
48 static const uint32_t VREG_TYPE_OFFSET = 0;
49 static const uint32_t VREG_DATA_OFFSET = 1;
50 static const uint32_t TYPE_INDEX = 0;
51 static const uint32_t PAYLOAD_INDEX = 1;
52 static const uint32_t INT64LOW_INDEX = 0;
53 static const uint32_t INT64HIGH_INDEX = 1;
54 #elif defined(JS_PUNBOX64)
55 #  define BOX_PIECES 1
56 #else
57 #  error "Unknown!"
58 #endif
59 
60 static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
61 
62 // Represents storage for an operand. For constants, the pointer is tagged
63 // with a single bit, and the untagged pointer is a pointer to a Value.
64 class LAllocation : public TempObject {
65   uintptr_t bits_;
66 
67   // 3 bits gives us enough for an interesting set of Kinds and also fits
68   // within the alignment bits of pointers to Value, which are always
69   // 8-byte aligned.
70   static const uintptr_t KIND_BITS = 3;
71   static const uintptr_t KIND_SHIFT = 0;
72   static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
73 
74  protected:
75 #ifdef JS_64BIT
76   static const uintptr_t DATA_BITS = sizeof(uint32_t) * 8;
77 #else
78   static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
79 #endif
80   static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
81 
82  public:
83   enum Kind {
84     CONSTANT_VALUE,  // MConstant*.
85     CONSTANT_INDEX,  // Constant arbitrary index.
86     USE,         // Use of a virtual register, with physical allocation policy.
87     GPR,         // General purpose register.
88     FPU,         // Floating-point register.
89     STACK_SLOT,  // Stack slot.
90     STACK_AREA,  // Stack area.
91     ARGUMENT_SLOT  // Argument slot.
92   };
93 
94   static const uintptr_t DATA_MASK = (uintptr_t(1) << DATA_BITS) - 1;
95 
96  protected:
data()97   uint32_t data() const {
98     MOZ_ASSERT(!hasIns());
99     return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
100   }
setData(uintptr_t data)101   void setData(uintptr_t data) {
102     MOZ_ASSERT(!hasIns());
103     MOZ_ASSERT(data <= DATA_MASK);
104     bits_ &= ~(DATA_MASK << DATA_SHIFT);
105     bits_ |= (data << DATA_SHIFT);
106   }
setKindAndData(Kind kind,uintptr_t data)107   void setKindAndData(Kind kind, uintptr_t data) {
108     MOZ_ASSERT(data <= DATA_MASK);
109     bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
110     MOZ_ASSERT(!hasIns());
111   }
112 
hasIns()113   bool hasIns() const { return isStackArea(); }
ins()114   const LInstruction* ins() const {
115     MOZ_ASSERT(hasIns());
116     return reinterpret_cast<const LInstruction*>(bits_ &
117                                                  ~(KIND_MASK << KIND_SHIFT));
118   }
ins()119   LInstruction* ins() {
120     MOZ_ASSERT(hasIns());
121     return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
122   }
setKindAndIns(Kind kind,LInstruction * ins)123   void setKindAndIns(Kind kind, LInstruction* ins) {
124     uintptr_t data = reinterpret_cast<uintptr_t>(ins);
125     MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
126     bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
127     MOZ_ASSERT(hasIns());
128   }
129 
LAllocation(Kind kind,uintptr_t data)130   LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
LAllocation(Kind kind,LInstruction * ins)131   LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
LAllocation(Kind kind)132   explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
133 
134  public:
LAllocation()135   LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
136 
137   // The MConstant pointer must have its low bits cleared.
LAllocation(const MConstant * c)138   explicit LAllocation(const MConstant* c) {
139     MOZ_ASSERT(c);
140     bits_ = uintptr_t(c);
141     MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
142     bits_ |= CONSTANT_VALUE << KIND_SHIFT;
143   }
144   inline explicit LAllocation(AnyRegister reg);
145 
kind()146   Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
147 
isBogus()148   bool isBogus() const { return bits_ == 0; }
isUse()149   bool isUse() const { return kind() == USE; }
isConstant()150   bool isConstant() const { return isConstantValue() || isConstantIndex(); }
isConstantValue()151   bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
isConstantIndex()152   bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
isGeneralReg()153   bool isGeneralReg() const { return kind() == GPR; }
isFloatReg()154   bool isFloatReg() const { return kind() == FPU; }
isStackSlot()155   bool isStackSlot() const { return kind() == STACK_SLOT; }
isStackArea()156   bool isStackArea() const { return kind() == STACK_AREA; }
isArgument()157   bool isArgument() const { return kind() == ARGUMENT_SLOT; }
isRegister()158   bool isRegister() const { return isGeneralReg() || isFloatReg(); }
isRegister(bool needFloat)159   bool isRegister(bool needFloat) const {
160     return needFloat ? isFloatReg() : isGeneralReg();
161   }
isMemory()162   bool isMemory() const { return isStackSlot() || isArgument(); }
163   inline uint32_t memorySlot() const;
164   inline LUse* toUse();
165   inline const LUse* toUse() const;
166   inline const LGeneralReg* toGeneralReg() const;
167   inline const LFloatReg* toFloatReg() const;
168   inline const LStackSlot* toStackSlot() const;
169   inline LStackArea* toStackArea();
170   inline const LStackArea* toStackArea() const;
171   inline const LArgument* toArgument() const;
172   inline const LConstantIndex* toConstantIndex() const;
173   inline AnyRegister toRegister() const;
174 
toConstant()175   const MConstant* toConstant() const {
176     MOZ_ASSERT(isConstantValue());
177     return reinterpret_cast<const MConstant*>(bits_ &
178                                               ~(KIND_MASK << KIND_SHIFT));
179   }
180 
181   bool operator==(const LAllocation& other) const {
182     return bits_ == other.bits_;
183   }
184 
185   bool operator!=(const LAllocation& other) const {
186     return bits_ != other.bits_;
187   }
188 
hash()189   HashNumber hash() const { return bits_; }
190 
191   bool aliases(const LAllocation& other) const;
192 
193 #ifdef JS_JITSPEW
194   UniqueChars toString() const;
195   void dump() const;
196 #endif
197 };
198 
199 class LUse : public LAllocation {
200   static const uint32_t POLICY_BITS = 3;
201   static const uint32_t POLICY_SHIFT = 0;
202   static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
203 #ifdef JS_CODEGEN_ARM64
204   static const uint32_t REG_BITS = 7;
205 #else
206   static const uint32_t REG_BITS = 6;
207 #endif
208   static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
209   static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
210 
211   // Whether the physical register for this operand may be reused for a def.
212   static const uint32_t USED_AT_START_BITS = 1;
213   static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
214   static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
215 
216   // The REG field will hold the register code for any Register or
217   // FloatRegister, though not for an AnyRegister.
218   static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
219                     REG_MASK + 1,
220                 "The field must be able to represent any register code");
221 
222  public:
223   // Virtual registers get the remaining bits.
224   static const uint32_t VREG_BITS =
225       DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
226   static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
227   static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
228 
229   enum Policy {
230     // Input should be in a read-only register or stack slot.
231     ANY,
232 
233     // Input must be in a read-only register.
234     REGISTER,
235 
236     // Input must be in a specific, read-only register.
237     FIXED,
238 
239     // Keep the used virtual register alive, and use whatever allocation is
240     // available. This is similar to ANY but hints to the register allocator
241     // that it is never useful to optimize this site.
242     KEEPALIVE,
243 
244     // Input must be allocated on the stack.  Only used when extracting stack
245     // results from stack result areas.
246     STACK,
247 
248     // For snapshot inputs, indicates that the associated instruction will
249     // write this input to its output register before bailing out.
250     // The register allocator may thus allocate that output register, and
251     // does not need to keep the virtual register alive (alternatively,
252     // this may be treated as KEEPALIVE).
253     RECOVERED_INPUT
254   };
255 
set(Policy policy,uint32_t reg,bool usedAtStart)256   void set(Policy policy, uint32_t reg, bool usedAtStart) {
257     MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
258     setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
259                             ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
260   }
261 
262  public:
263   LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
264     set(policy, 0, usedAtStart);
265     setVirtualRegister(vreg);
266   }
267   explicit LUse(Policy policy, bool usedAtStart = false) {
268     set(policy, 0, usedAtStart);
269   }
270   explicit LUse(Register reg, bool usedAtStart = false) {
271     set(FIXED, reg.code(), usedAtStart);
272   }
273   explicit LUse(FloatRegister reg, bool usedAtStart = false) {
274     set(FIXED, reg.code(), usedAtStart);
275   }
276   LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
277     set(FIXED, reg.code(), usedAtStart);
278     setVirtualRegister(virtualRegister);
279   }
280   LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
281     set(FIXED, reg.code(), usedAtStart);
282     setVirtualRegister(virtualRegister);
283   }
284 
setVirtualRegister(uint32_t index)285   void setVirtualRegister(uint32_t index) {
286     MOZ_ASSERT(index < VREG_MASK);
287 
288     uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
289     setData(old | (index << VREG_SHIFT));
290   }
291 
policy()292   Policy policy() const {
293     Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
294     return policy;
295   }
virtualRegister()296   uint32_t virtualRegister() const {
297     uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
298     MOZ_ASSERT(index != 0);
299     return index;
300   }
registerCode()301   uint32_t registerCode() const {
302     MOZ_ASSERT(policy() == FIXED);
303     return (data() >> REG_SHIFT) & REG_MASK;
304   }
isFixedRegister()305   bool isFixedRegister() const { return policy() == FIXED; }
usedAtStart()306   bool usedAtStart() const {
307     return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
308   }
309 };
310 
311 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
312 
313 class LBoxAllocation {
314 #ifdef JS_NUNBOX32
315   LAllocation type_;
316   LAllocation payload_;
317 #else
318   LAllocation value_;
319 #endif
320 
321  public:
322 #ifdef JS_NUNBOX32
LBoxAllocation(LAllocation type,LAllocation payload)323   LBoxAllocation(LAllocation type, LAllocation payload)
324       : type_(type), payload_(payload) {}
325 
type()326   LAllocation type() const { return type_; }
payload()327   LAllocation payload() const { return payload_; }
328 #else
329   explicit LBoxAllocation(LAllocation value) : value_(value) {}
330 
331   LAllocation value() const { return value_; }
332 #endif
333 };
334 
335 template <class ValT>
336 class LInt64Value {
337 #if JS_BITS_PER_WORD == 32
338   ValT high_;
339   ValT low_;
340 #else
341   ValT value_;
342 #endif
343 
344  public:
345   LInt64Value() = default;
346 
347 #if JS_BITS_PER_WORD == 32
LInt64Value(ValT high,ValT low)348   LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
349 
high()350   ValT high() const { return high_; }
low()351   ValT low() const { return low_; }
352 
pointerHigh()353   const ValT* pointerHigh() const { return &high_; }
pointerLow()354   const ValT* pointerLow() const { return &low_; }
355 #else
LInt64Value(ValT value)356   explicit LInt64Value(ValT value) : value_(value) {}
357 
value()358   ValT value() const { return value_; }
pointer()359   const ValT* pointer() const { return &value_; }
360 #endif
361 };
362 
363 using LInt64Allocation = LInt64Value<LAllocation>;
364 
365 class LGeneralReg : public LAllocation {
366  public:
LGeneralReg(Register reg)367   explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
368 
reg()369   Register reg() const { return Register::FromCode(data()); }
370 };
371 
372 class LFloatReg : public LAllocation {
373  public:
LFloatReg(FloatRegister reg)374   explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
375 
reg()376   FloatRegister reg() const { return FloatRegister::FromCode(data()); }
377 };
378 
379 // Arbitrary constant index.
380 class LConstantIndex : public LAllocation {
LConstantIndex(uint32_t index)381   explicit LConstantIndex(uint32_t index)
382       : LAllocation(CONSTANT_INDEX, index) {}
383 
384  public:
FromIndex(uint32_t index)385   static LConstantIndex FromIndex(uint32_t index) {
386     return LConstantIndex(index);
387   }
388 
index()389   uint32_t index() const { return data(); }
390 };
391 
392 // Stack slots are indices into the stack. The indices are byte indices.
393 class LStackSlot : public LAllocation {
394  public:
LStackSlot(uint32_t slot)395   explicit LStackSlot(uint32_t slot) : LAllocation(STACK_SLOT, slot) {}
396 
slot()397   uint32_t slot() const { return data(); }
398 };
399 
400 // Stack area indicates a contiguous stack allocation meant to receive call
401 // results that don't fit in registers.
402 class LStackArea : public LAllocation {
403  public:
LStackArea(LInstruction * stackArea)404   explicit LStackArea(LInstruction* stackArea)
405       : LAllocation(STACK_AREA, stackArea) {}
406 
407   // Byte index of base of stack area, in the same coordinate space as
408   // LStackSlot::slot().
409   inline uint32_t base() const;
410   inline void setBase(uint32_t base);
411 
412   // Size in bytes of the stack area.
413   inline uint32_t size() const;
alignment()414   inline uint32_t alignment() const { return 8; }
415 
416   class ResultIterator {
417     const LStackArea& alloc_;
418     uint32_t idx_;
419 
420    public:
ResultIterator(const LStackArea & alloc)421     explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
422 
423     inline bool done() const;
424     inline void next();
425     inline LAllocation alloc() const;
426     inline bool isGcPointer() const;
427 
428     explicit operator bool() const { return !done(); }
429   };
430 
results()431   ResultIterator results() const { return ResultIterator(*this); }
432 
433   inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
434 };
435 
436 // Arguments are reverse indices into the stack. The indices are byte indices.
437 class LArgument : public LAllocation {
438  public:
LArgument(uint32_t index)439   explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
440 
index()441   uint32_t index() const { return data(); }
442 };
443 
memorySlot()444 inline uint32_t LAllocation::memorySlot() const {
445   MOZ_ASSERT(isMemory());
446   return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
447 }
448 
449 // Represents storage for a definition.
450 class LDefinition {
451   // Bits containing policy, type, and virtual register.
452   uint32_t bits_;
453 
454   // Before register allocation, this optionally contains a fixed policy.
455   // Register allocation assigns this field to a physical policy if none is
456   // fixed.
457   //
458   // Right now, pre-allocated outputs are limited to the following:
459   //   * Physical argument stack slots.
460   //   * Physical registers.
461   LAllocation output_;
462 
463   static const uint32_t TYPE_BITS = 4;
464   static const uint32_t TYPE_SHIFT = 0;
465   static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
466   static const uint32_t POLICY_BITS = 2;
467   static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
468   static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
469 
470   static const uint32_t VREG_BITS =
471       (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
472   static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
473   static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
474 
475  public:
476   // Note that definitions, by default, are always allocated a register,
477   // unless the policy specifies that an input can be re-used and that input
478   // is a stack slot.
479   enum Policy {
480     // The policy is predetermined by the LAllocation attached to this
481     // definition. The allocation may be:
482     //   * A register, which may not appear as any fixed temporary.
483     //   * A stack slot or argument.
484     //
485     // Register allocation will not modify a fixed allocation.
486     FIXED,
487 
488     // A random register of an appropriate class will be assigned.
489     REGISTER,
490 
491     // An area on the stack must be assigned.  Used when defining stack results
492     // and stack result areas.
493     STACK,
494 
495     // One definition per instruction must re-use the first input
496     // allocation, which (for now) must be a register.
497     MUST_REUSE_INPUT
498   };
499 
500   enum Type {
501     GENERAL,  // Generic, integer or pointer-width data (GPR).
502     INT32,    // int32 data (GPR).
503     OBJECT,   // Pointer that may be collected as garbage (GPR).
504     SLOTS,    // Slots/elements pointer that may be moved by minor GCs (GPR).
505     FLOAT32,  // 32-bit floating-point value (FPU).
506     DOUBLE,   // 64-bit floating-point value (FPU).
507     SIMD128,  // 128-bit SIMD vector (FPU).
508     STACKRESULTS,  // A variable-size stack allocation that may contain objects.
509 #ifdef JS_NUNBOX32
510     // A type virtual register must be followed by a payload virtual
511     // register, as both will be tracked as a single gcthing.
512     TYPE,
513     PAYLOAD
514 #else
515     BOX  // Joined box, for punbox systems. (GPR, gcthing)
516 #endif
517   };
518 
set(uint32_t index,Type type,Policy policy)519   void set(uint32_t index, Type type, Policy policy) {
520     static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
521     bits_ =
522         (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
523 #ifndef ENABLE_WASM_SIMD
524     MOZ_ASSERT(this->type() != SIMD128);
525 #endif
526   }
527 
528  public:
529   LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
530     set(index, type, policy);
531   }
532 
533   explicit LDefinition(Type type, Policy policy = REGISTER) {
534     set(0, type, policy);
535   }
536 
LDefinition(Type type,const LAllocation & a)537   LDefinition(Type type, const LAllocation& a) : output_(a) {
538     set(0, type, FIXED);
539   }
540 
LDefinition(uint32_t index,Type type,const LAllocation & a)541   LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
542     set(index, type, FIXED);
543   }
544 
LDefinition()545   LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
546 
BogusTemp()547   static LDefinition BogusTemp() { return LDefinition(); }
548 
policy()549   Policy policy() const {
550     return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
551   }
type()552   Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
553 
isFloatRegCompatible(Type type,FloatRegister reg)554   static bool isFloatRegCompatible(Type type, FloatRegister reg) {
555     if (type == FLOAT32) {
556       return reg.isSingle();
557     }
558     if (type == DOUBLE) {
559       return reg.isDouble();
560     }
561     MOZ_ASSERT(type == SIMD128);
562     return reg.isSimd128();
563   }
564 
isCompatibleReg(const AnyRegister & r)565   bool isCompatibleReg(const AnyRegister& r) const {
566     if (isFloatReg() && r.isFloat()) {
567       return isFloatRegCompatible(type(), r.fpu());
568     }
569     return !isFloatReg() && !r.isFloat();
570   }
isCompatibleDef(const LDefinition & other)571   bool isCompatibleDef(const LDefinition& other) const {
572 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
573     if (isFloatReg() && other.isFloatReg()) {
574       return type() == other.type();
575     }
576     return !isFloatReg() && !other.isFloatReg();
577 #else
578     return isFloatReg() == other.isFloatReg();
579 #endif
580   }
581 
isFloatReg(Type type)582   static bool isFloatReg(Type type) {
583     return type == FLOAT32 || type == DOUBLE || type == SIMD128;
584   }
isFloatReg()585   bool isFloatReg() const { return isFloatReg(type()); }
586 
virtualRegister()587   uint32_t virtualRegister() const {
588     uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
589     // MOZ_ASSERT(index != 0);
590     return index;
591   }
output()592   LAllocation* output() { return &output_; }
output()593   const LAllocation* output() const { return &output_; }
isFixed()594   bool isFixed() const { return policy() == FIXED; }
isBogusTemp()595   bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
setVirtualRegister(uint32_t index)596   void setVirtualRegister(uint32_t index) {
597     MOZ_ASSERT(index < VREG_MASK);
598     bits_ &= ~(VREG_MASK << VREG_SHIFT);
599     bits_ |= index << VREG_SHIFT;
600   }
setOutput(const LAllocation & a)601   void setOutput(const LAllocation& a) {
602     output_ = a;
603     if (!a.isUse()) {
604       bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
605       bits_ |= FIXED << POLICY_SHIFT;
606     }
607   }
setReusedInput(uint32_t operand)608   void setReusedInput(uint32_t operand) {
609     output_ = LConstantIndex::FromIndex(operand);
610   }
getReusedInput()611   uint32_t getReusedInput() const {
612     MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
613     return output_.toConstantIndex()->index();
614   }
615 
TypeFrom(MIRType type)616   static inline Type TypeFrom(MIRType type) {
617     switch (type) {
618       case MIRType::Boolean:
619       case MIRType::Int32:
620         // The stack slot allocator doesn't currently support allocating
621         // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
622         static_assert(sizeof(bool) <= sizeof(int32_t),
623                       "bool doesn't fit in an int32 slot");
624         return LDefinition::INT32;
625       case MIRType::String:
626       case MIRType::Symbol:
627       case MIRType::BigInt:
628       case MIRType::Object:
629       case MIRType::RefOrNull:
630         return LDefinition::OBJECT;
631       case MIRType::Double:
632         return LDefinition::DOUBLE;
633       case MIRType::Float32:
634         return LDefinition::FLOAT32;
635 #if defined(JS_PUNBOX64)
636       case MIRType::Value:
637         return LDefinition::BOX;
638 #endif
639       case MIRType::Slots:
640       case MIRType::Elements:
641         return LDefinition::SLOTS;
642       case MIRType::Pointer:
643       case MIRType::IntPtr:
644         return LDefinition::GENERAL;
645 #if defined(JS_PUNBOX64)
646       case MIRType::Int64:
647         return LDefinition::GENERAL;
648 #endif
649       case MIRType::StackResults:
650         return LDefinition::STACKRESULTS;
651       case MIRType::Simd128:
652         return LDefinition::SIMD128;
653       default:
654         MOZ_CRASH("unexpected type");
655     }
656   }
657 
658   UniqueChars toString() const;
659 
660 #ifdef JS_JITSPEW
661   void dump() const;
662 #endif
663 };
664 
665 class LInt64Definition : public LInt64Value<LDefinition> {
666  public:
667   using LInt64Value<LDefinition>::LInt64Value;
668 
BogusTemp()669   static LInt64Definition BogusTemp() { return LInt64Definition(); }
670 
isBogusTemp()671   bool isBogusTemp() const {
672 #if JS_BITS_PER_WORD == 32
673     MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
674     return high().isBogusTemp();
675 #else
676     return value().isBogusTemp();
677 #endif
678   }
679 };
680 
681 // Forward declarations of LIR types.
682 #define LIROP(op) class L##op;
683 LIR_OPCODE_LIST(LIROP)
684 #undef LIROP
685 
686 class LSnapshot;
687 class LSafepoint;
688 class LElementVisitor;
689 
690 constexpr size_t MaxNumLInstructionOperands = 63;
691 
692 // The common base class for LPhi and LInstruction.
693 class LNode {
694  protected:
695   MDefinition* mir_;
696 
697  private:
698   LBlock* block_;
699   uint32_t id_;
700 
701  protected:
702   // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
703   uint32_t op_ : 10;
704   uint32_t isCall_ : 1;
705 
706   // LPhi::numOperands() may not fit in this bitfield, so we only use this
707   // field for LInstruction.
708   uint32_t nonPhiNumOperands_ : 6;
709   static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
710                 "packing constraints");
711 
712   // For LInstruction, the first operand is stored at offset
713   // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
714   uint32_t nonPhiOperandsOffset_ : 5;
715   uint32_t numDefs_ : 4;
716   uint32_t numTemps_ : 4;
717 
718  public:
719   enum class Opcode {
720 #define LIROP(name) name,
721     LIR_OPCODE_LIST(LIROP)
722 #undef LIROP
723         Invalid
724   };
725 
LNode(Opcode op,uint32_t nonPhiNumOperands,uint32_t numDefs,uint32_t numTemps)726   LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
727         uint32_t numTemps)
728       : mir_(nullptr),
729         block_(nullptr),
730         id_(0),
731         op_(uint32_t(op)),
732         isCall_(false),
733         nonPhiNumOperands_(nonPhiNumOperands),
734         nonPhiOperandsOffset_(0),
735         numDefs_(numDefs),
736         numTemps_(numTemps) {
737     MOZ_ASSERT(op < Opcode::Invalid);
738     MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
739     MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
740                "nonPhiNumOperands must fit in bitfield");
741     MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
742     MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
743   }
744 
opName()745   const char* opName() {
746     switch (op()) {
747 #define LIR_NAME_INS(name) \
748   case Opcode::name:       \
749     return #name;
750       LIR_OPCODE_LIST(LIR_NAME_INS)
751 #undef LIR_NAME_INS
752       default:
753         MOZ_CRASH("Invalid op");
754     }
755   }
756 
757   // Hook for opcodes to add extra high level detail about what code will be
758   // emitted for the op.
759  private:
extraName()760   const char* extraName() const { return nullptr; }
761 
762  public:
763 #ifdef JS_JITSPEW
764   const char* getExtraName() const;
765 #endif
766 
op()767   Opcode op() const { return Opcode(op_); }
768 
isInstruction()769   bool isInstruction() const { return op() != Opcode::Phi; }
770   inline LInstruction* toInstruction();
771   inline const LInstruction* toInstruction() const;
772 
773   // Returns the number of outputs of this instruction. If an output is
774   // unallocated, it is an LDefinition, defining a virtual register.
numDefs()775   size_t numDefs() const { return numDefs_; }
776 
isCall()777   bool isCall() const { return isCall_; }
778 
779   // Does this call preserve the given register?
780   // By default, it is assumed that all registers are clobbered by a call.
781   inline bool isCallPreserved(AnyRegister reg) const;
782 
id()783   uint32_t id() const { return id_; }
setId(uint32_t id)784   void setId(uint32_t id) {
785     MOZ_ASSERT(!id_);
786     MOZ_ASSERT(id);
787     id_ = id;
788   }
setMir(MDefinition * mir)789   void setMir(MDefinition* mir) { mir_ = mir; }
mirRaw()790   MDefinition* mirRaw() const {
791     /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
792     return mir_;
793   }
block()794   LBlock* block() const { return block_; }
setBlock(LBlock * block)795   void setBlock(LBlock* block) { block_ = block; }
796 
797   // For an instruction which has a MUST_REUSE_INPUT output, whether that
798   // output register will be restored to its original value when bailing out.
799   inline bool recoversInput() const;
800 
801 #ifdef JS_JITSPEW
802   void dump(GenericPrinter& out);
803   void dump();
804   static void printName(GenericPrinter& out, Opcode op);
805   void printName(GenericPrinter& out);
806   void printOperands(GenericPrinter& out);
807 #endif
808 
809  public:
810   // Opcode testing and casts.
811 #define LIROP(name)                                      \
812   bool is##name() const { return op() == Opcode::name; } \
813   inline L##name* to##name();                            \
814   inline const L##name* to##name() const;
815   LIR_OPCODE_LIST(LIROP)
816 #undef LIROP
817 
818 // Note: GenerateOpcodeFiles.py generates LIROpsGenerated.h based on this
819 // macro.
820 #define LIR_HEADER(opcode) \
821   static constexpr LNode::Opcode classOpcode = LNode::Opcode::opcode;
822 };
823 
824 class LInstruction : public LNode,
825                      public TempObject,
826                      public InlineListNode<LInstruction> {
827   // This snapshot could be set after a ResumePoint.  It is used to restart
828   // from the resume point pc.
829   LSnapshot* snapshot_;
830 
831   // Structure capturing the set of stack slots and registers which are known
832   // to hold either gcthings or Values.
833   LSafepoint* safepoint_;
834 
835   LMoveGroup* inputMoves_;
836   LMoveGroup* fixReuseMoves_;
837   LMoveGroup* movesAfter_;
838 
839  protected:
LInstruction(Opcode opcode,uint32_t numOperands,uint32_t numDefs,uint32_t numTemps)840   LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
841                uint32_t numTemps)
842       : LNode(opcode, numOperands, numDefs, numTemps),
843         snapshot_(nullptr),
844         safepoint_(nullptr),
845         inputMoves_(nullptr),
846         fixReuseMoves_(nullptr),
847         movesAfter_(nullptr) {}
848 
setIsCall()849   void setIsCall() { isCall_ = true; }
850 
851  public:
852   inline LDefinition* getDef(size_t index);
853 
setDef(size_t index,const LDefinition & def)854   void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
855 
getOperand(size_t index)856   LAllocation* getOperand(size_t index) const {
857     MOZ_ASSERT(index < numOperands());
858     MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
859     uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
860                   nonPhiOperandsOffset_ * sizeof(uintptr_t);
861     return reinterpret_cast<LAllocation*>(p) + index;
862   }
setOperand(size_t index,const LAllocation & a)863   void setOperand(size_t index, const LAllocation& a) {
864     *getOperand(index) = a;
865   }
866 
initOperandsOffset(size_t offset)867   void initOperandsOffset(size_t offset) {
868     MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
869     MOZ_ASSERT(offset >= sizeof(LInstruction));
870     MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
871     offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
872     nonPhiOperandsOffset_ = offset;
873     MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
874   }
875 
876   // Returns information about temporary registers needed. Each temporary
877   // register is an LDefinition with a fixed or virtual register and
878   // either GENERAL, FLOAT32, or DOUBLE type.
numTemps()879   size_t numTemps() const { return numTemps_; }
880   inline LDefinition* getTemp(size_t index);
881 
snapshot()882   LSnapshot* snapshot() const { return snapshot_; }
safepoint()883   LSafepoint* safepoint() const { return safepoint_; }
inputMoves()884   LMoveGroup* inputMoves() const { return inputMoves_; }
setInputMoves(LMoveGroup * moves)885   void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
fixReuseMoves()886   LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
setFixReuseMoves(LMoveGroup * moves)887   void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
movesAfter()888   LMoveGroup* movesAfter() const { return movesAfter_; }
setMovesAfter(LMoveGroup * moves)889   void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
numOperands()890   uint32_t numOperands() const { return nonPhiNumOperands_; }
891   void assignSnapshot(LSnapshot* snapshot);
892   void initSafepoint(TempAllocator& alloc);
893 
894   class InputIterator;
895 };
896 
toInstruction()897 LInstruction* LNode::toInstruction() {
898   MOZ_ASSERT(isInstruction());
899   return static_cast<LInstruction*>(this);
900 }
901 
toInstruction()902 const LInstruction* LNode::toInstruction() const {
903   MOZ_ASSERT(isInstruction());
904   return static_cast<const LInstruction*>(this);
905 }
906 
907 class LElementVisitor {
908 #ifdef TRACK_SNAPSHOTS
909   LInstruction* ins_ = nullptr;
910 #endif
911 
912  protected:
913 #ifdef TRACK_SNAPSHOTS
instruction()914   LInstruction* instruction() { return ins_; }
915 
setElement(LInstruction * ins)916   void setElement(LInstruction* ins) { ins_ = ins; }
917 #else
918   void setElement(LInstruction* ins) {}
919 #endif
920 };
921 
922 using LInstructionIterator = InlineList<LInstruction>::iterator;
923 using LInstructionReverseIterator = InlineList<LInstruction>::reverse_iterator;
924 
925 class MPhi;
926 
927 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
928 // register allocator. Like its equivalent in MIR, phis are collected at the
929 // top of blocks and are meant to be executed in parallel, choosing the input
930 // corresponding to the predecessor taken in the control flow graph.
931 class LPhi final : public LNode {
932   LAllocation* const inputs_;
933   LDefinition def_;
934 
935  public:
LIR_HEADER(Phi)936   LIR_HEADER(Phi)
937 
938   LPhi(MPhi* ins, LAllocation* inputs)
939       : LNode(classOpcode,
940               /* nonPhiNumOperands = */ 0,
941               /* numDefs = */ 1,
942               /* numTemps = */ 0),
943         inputs_(inputs) {
944     setMir(ins);
945   }
946 
getDef(size_t index)947   LDefinition* getDef(size_t index) {
948     MOZ_ASSERT(index == 0);
949     return &def_;
950   }
setDef(size_t index,const LDefinition & def)951   void setDef(size_t index, const LDefinition& def) {
952     MOZ_ASSERT(index == 0);
953     def_ = def;
954   }
numOperands()955   size_t numOperands() const { return mir_->toPhi()->numOperands(); }
getOperand(size_t index)956   LAllocation* getOperand(size_t index) {
957     MOZ_ASSERT(index < numOperands());
958     return &inputs_[index];
959   }
setOperand(size_t index,const LAllocation & a)960   void setOperand(size_t index, const LAllocation& a) {
961     MOZ_ASSERT(index < numOperands());
962     inputs_[index] = a;
963   }
964 
965   // Phis don't have temps, so calling numTemps/getTemp is pointless.
966   size_t numTemps() const = delete;
967   LDefinition* getTemp(size_t index) = delete;
968 };
969 
970 class LMoveGroup;
971 class LBlock {
972   MBasicBlock* block_;
973   FixedList<LPhi> phis_;
974   InlineList<LInstruction> instructions_;
975   LMoveGroup* entryMoveGroup_;
976   LMoveGroup* exitMoveGroup_;
977   Label label_;
978 
979  public:
980   explicit LBlock(MBasicBlock* block);
981   [[nodiscard]] bool init(TempAllocator& alloc);
982 
add(LInstruction * ins)983   void add(LInstruction* ins) {
984     ins->setBlock(this);
985     instructions_.pushBack(ins);
986   }
numPhis()987   size_t numPhis() const { return phis_.length(); }
getPhi(size_t index)988   LPhi* getPhi(size_t index) { return &phis_[index]; }
getPhi(size_t index)989   const LPhi* getPhi(size_t index) const { return &phis_[index]; }
mir()990   MBasicBlock* mir() const { return block_; }
begin()991   LInstructionIterator begin() { return instructions_.begin(); }
begin(LInstruction * at)992   LInstructionIterator begin(LInstruction* at) {
993     return instructions_.begin(at);
994   }
end()995   LInstructionIterator end() { return instructions_.end(); }
rbegin()996   LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
rbegin(LInstruction * at)997   LInstructionReverseIterator rbegin(LInstruction* at) {
998     return instructions_.rbegin(at);
999   }
rend()1000   LInstructionReverseIterator rend() { return instructions_.rend(); }
instructions()1001   InlineList<LInstruction>& instructions() { return instructions_; }
insertAfter(LInstruction * at,LInstruction * ins)1002   void insertAfter(LInstruction* at, LInstruction* ins) {
1003     instructions_.insertAfter(at, ins);
1004   }
insertBefore(LInstruction * at,LInstruction * ins)1005   void insertBefore(LInstruction* at, LInstruction* ins) {
1006     instructions_.insertBefore(at, ins);
1007   }
firstElementWithId()1008   const LNode* firstElementWithId() const {
1009     return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
1010                           : firstInstructionWithId();
1011   }
firstId()1012   uint32_t firstId() const { return firstElementWithId()->id(); }
lastId()1013   uint32_t lastId() const { return lastInstructionWithId()->id(); }
1014   const LInstruction* firstInstructionWithId() const;
lastInstructionWithId()1015   const LInstruction* lastInstructionWithId() const {
1016     const LInstruction* last = *instructions_.rbegin();
1017     MOZ_ASSERT(last->id());
1018     // The last instruction is a control flow instruction which does not have
1019     // any output.
1020     MOZ_ASSERT(last->numDefs() == 0);
1021     return last;
1022   }
1023 
1024   // Return the label to branch to when branching to this block.
label()1025   Label* label() {
1026     MOZ_ASSERT(!isTrivial());
1027     return &label_;
1028   }
1029 
1030   LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
1031   LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
1032 
1033   // Test whether this basic block is empty except for a simple goto, and
1034   // which is not forming a loop. No code will be emitted for such blocks.
isTrivial()1035   bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
1036 
1037 #ifdef JS_JITSPEW
1038   void dump(GenericPrinter& out);
1039   void dump();
1040 #endif
1041 };
1042 
1043 namespace details {
1044 template <size_t Defs, size_t Temps>
1045 class LInstructionFixedDefsTempsHelper : public LInstruction {
1046   mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
1047 
1048  protected:
LInstructionFixedDefsTempsHelper(Opcode opcode,uint32_t numOperands)1049   LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
1050       : LInstruction(opcode, numOperands, Defs, Temps) {}
1051 
1052  public:
1053   // Override the methods in LInstruction with more optimized versions
1054   // for when we know the exact instruction type.
getDef(size_t index)1055   LDefinition* getDef(size_t index) {
1056     MOZ_ASSERT(index < Defs);
1057     return &defsAndTemps_[index];
1058   }
getTemp(size_t index)1059   LDefinition* getTemp(size_t index) {
1060     MOZ_ASSERT(index < Temps);
1061     return &defsAndTemps_[Defs + index];
1062   }
getInt64Temp(size_t index)1063   LInt64Definition getInt64Temp(size_t index) {
1064     MOZ_ASSERT(index + INT64_PIECES <= Temps);
1065 #if JS_BITS_PER_WORD == 32
1066     return LInt64Definition(defsAndTemps_[Defs + index + INT64HIGH_INDEX],
1067                             defsAndTemps_[Defs + index + INT64LOW_INDEX]);
1068 #else
1069     return LInt64Definition(defsAndTemps_[Defs + index]);
1070 #endif
1071   }
1072 
setDef(size_t index,const LDefinition & def)1073   void setDef(size_t index, const LDefinition& def) {
1074     MOZ_ASSERT(index < Defs);
1075     defsAndTemps_[index] = def;
1076   }
setTemp(size_t index,const LDefinition & a)1077   void setTemp(size_t index, const LDefinition& a) {
1078     MOZ_ASSERT(index < Temps);
1079     defsAndTemps_[Defs + index] = a;
1080   }
setInt64Temp(size_t index,const LInt64Definition & a)1081   void setInt64Temp(size_t index, const LInt64Definition& a) {
1082 #if JS_BITS_PER_WORD == 32
1083     setTemp(index, a.low());
1084     setTemp(index + 1, a.high());
1085 #else
1086     setTemp(index, a.value());
1087 #endif
1088   }
1089 
1090   // Default accessors, assuming a single input and output, respectively.
input()1091   const LAllocation* input() {
1092     MOZ_ASSERT(numOperands() == 1);
1093     return getOperand(0);
1094   }
output()1095   const LDefinition* output() {
1096     MOZ_ASSERT(numDefs() == 1);
1097     return getDef(0);
1098   }
offsetOfDef(size_t index)1099   static size_t offsetOfDef(size_t index) {
1100     using T = LInstructionFixedDefsTempsHelper<0, 0>;
1101     return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
1102   }
offsetOfTemp(uint32_t numDefs,uint32_t index)1103   static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
1104     using T = LInstructionFixedDefsTempsHelper<0, 0>;
1105     return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
1106   }
1107 };
1108 }  // namespace details
1109 
getDef(size_t index)1110 inline LDefinition* LInstruction::getDef(size_t index) {
1111   MOZ_ASSERT(index < numDefs());
1112   using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1113   uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
1114   return reinterpret_cast<LDefinition*>(p);
1115 }
1116 
getTemp(size_t index)1117 inline LDefinition* LInstruction::getTemp(size_t index) {
1118   MOZ_ASSERT(index < numTemps());
1119   using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1120   uint8_t* p =
1121       reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
1122   return reinterpret_cast<LDefinition*>(p);
1123 }
1124 
1125 template <size_t Defs, size_t Operands, size_t Temps>
1126 class LInstructionHelper
1127     : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1128   mozilla::Array<LAllocation, Operands> operands_;
1129 
1130  protected:
LInstructionHelper(LNode::Opcode opcode)1131   explicit LInstructionHelper(LNode::Opcode opcode)
1132       : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1133                                                                Operands) {
1134     static_assert(
1135         Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
1136         "mozilla::Array should not contain other fields");
1137     if (Operands > 0) {
1138       using T = LInstructionHelper<Defs, Operands, Temps>;
1139       this->initOperandsOffset(offsetof(T, operands_));
1140     }
1141   }
1142 
1143  public:
1144   // Override the methods in LInstruction with more optimized versions
1145   // for when we know the exact instruction type.
getOperand(size_t index)1146   LAllocation* getOperand(size_t index) { return &operands_[index]; }
setOperand(size_t index,const LAllocation & a)1147   void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
setBoxOperand(size_t index,const LBoxAllocation & alloc)1148   void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
1149 #ifdef JS_NUNBOX32
1150     operands_[index + TYPE_INDEX] = alloc.type();
1151     operands_[index + PAYLOAD_INDEX] = alloc.payload();
1152 #else
1153     operands_[index] = alloc.value();
1154 #endif
1155   }
setInt64Operand(size_t index,const LInt64Allocation & alloc)1156   void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
1157 #if JS_BITS_PER_WORD == 32
1158     operands_[index + INT64LOW_INDEX] = alloc.low();
1159     operands_[index + INT64HIGH_INDEX] = alloc.high();
1160 #else
1161     operands_[index] = alloc.value();
1162 #endif
1163   }
getInt64Operand(size_t offset)1164   const LInt64Allocation getInt64Operand(size_t offset) {
1165 #if JS_BITS_PER_WORD == 32
1166     return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
1167                             operands_[offset + INT64LOW_INDEX]);
1168 #else
1169     return LInt64Allocation(operands_[offset]);
1170 #endif
1171   }
1172 };
1173 
1174 template <size_t Defs, size_t Temps>
1175 class LVariadicInstruction
1176     : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1177  protected:
LVariadicInstruction(LNode::Opcode opcode,size_t numOperands)1178   LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
1179       : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1180                                                                numOperands) {}
1181 
1182  public:
setBoxOperand(size_t index,const LBoxAllocation & a)1183   void setBoxOperand(size_t index, const LBoxAllocation& a) {
1184 #ifdef JS_NUNBOX32
1185     this->setOperand(index + TYPE_INDEX, a.type());
1186     this->setOperand(index + PAYLOAD_INDEX, a.payload());
1187 #else
1188     this->setOperand(index, a.value());
1189 #endif
1190   }
1191 };
1192 
1193 template <size_t Defs, size_t Operands, size_t Temps>
1194 class LCallInstructionHelper
1195     : public LInstructionHelper<Defs, Operands, Temps> {
1196  protected:
LCallInstructionHelper(LNode::Opcode opcode)1197   explicit LCallInstructionHelper(LNode::Opcode opcode)
1198       : LInstructionHelper<Defs, Operands, Temps>(opcode) {
1199     this->setIsCall();
1200   }
1201 };
1202 
1203 template <size_t Defs, size_t Temps>
1204 class LBinaryCallInstructionHelper
1205     : public LCallInstructionHelper<Defs, 2, Temps> {
1206  protected:
LBinaryCallInstructionHelper(LNode::Opcode opcode)1207   explicit LBinaryCallInstructionHelper(LNode::Opcode opcode)
1208       : LCallInstructionHelper<Defs, 2, Temps>(opcode) {}
1209 
1210  public:
lhs()1211   const LAllocation* lhs() { return this->getOperand(0); }
rhs()1212   const LAllocation* rhs() { return this->getOperand(1); }
1213 };
1214 
1215 class LRecoverInfo : public TempObject {
1216  public:
1217   typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
1218 
1219  private:
1220   // List of instructions needed to recover the stack frames.
1221   // Outer frames are stored before inner frames.
1222   Instructions instructions_;
1223 
1224   // Cached offset where this resume point is encoded.
1225   RecoverOffset recoverOffset_;
1226 
1227   explicit LRecoverInfo(TempAllocator& alloc);
1228   [[nodiscard]] bool init(MResumePoint* mir);
1229 
1230   // Fill the instruction vector such as all instructions needed for the
1231   // recovery are pushed before the current instruction.
1232   template <typename Node>
1233   [[nodiscard]] bool appendOperands(Node* ins);
1234   [[nodiscard]] bool appendDefinition(MDefinition* def);
1235   [[nodiscard]] bool appendResumePoint(MResumePoint* rp);
1236 
1237  public:
1238   static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
1239 
1240   // Resume point of the inner most function.
mir()1241   MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
recoverOffset()1242   RecoverOffset recoverOffset() const { return recoverOffset_; }
setRecoverOffset(RecoverOffset offset)1243   void setRecoverOffset(RecoverOffset offset) {
1244     MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
1245     recoverOffset_ = offset;
1246   }
1247 
begin()1248   MNode** begin() { return instructions_.begin(); }
end()1249   MNode** end() { return instructions_.end(); }
numInstructions()1250   size_t numInstructions() const { return instructions_.length(); }
1251 
1252   class OperandIter {
1253    private:
1254     MNode** it_;
1255     MNode** end_;
1256     size_t op_;
1257     size_t opEnd_;
1258     MResumePoint* rp_;
1259     MNode* node_;
1260 
1261    public:
OperandIter(LRecoverInfo * recoverInfo)1262     explicit OperandIter(LRecoverInfo* recoverInfo)
1263         : it_(recoverInfo->begin()),
1264           end_(recoverInfo->end()),
1265           op_(0),
1266           opEnd_(0),
1267           rp_(nullptr),
1268           node_(nullptr) {
1269       settle();
1270     }
1271 
settle()1272     void settle() {
1273       opEnd_ = (*it_)->numOperands();
1274       while (opEnd_ == 0) {
1275         ++it_;
1276         op_ = 0;
1277         opEnd_ = (*it_)->numOperands();
1278       }
1279       node_ = *it_;
1280       if (node_->isResumePoint()) {
1281         rp_ = node_->toResumePoint();
1282       }
1283     }
1284 
1285     MDefinition* operator*() {
1286       if (rp_) {  // de-virtualize MResumePoint::getOperand calls.
1287         return rp_->getOperand(op_);
1288       }
1289       return node_->getOperand(op_);
1290     }
1291     MDefinition* operator->() {
1292       if (rp_) {  // de-virtualize MResumePoint::getOperand calls.
1293         return rp_->getOperand(op_);
1294       }
1295       return node_->getOperand(op_);
1296     }
1297 
1298     OperandIter& operator++() {
1299       ++op_;
1300       if (op_ != opEnd_) {
1301         return *this;
1302       }
1303       op_ = 0;
1304       ++it_;
1305       node_ = rp_ = nullptr;
1306       if (!*this) {
1307         settle();
1308       }
1309       return *this;
1310     }
1311 
1312     explicit operator bool() const { return it_ == end_; }
1313 
1314 #ifdef DEBUG
1315     bool canOptimizeOutIfUnused();
1316 #endif
1317   };
1318 };
1319 
1320 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
1321 // MResumePoints, they cannot be shared, as they are filled in by the register
1322 // allocator in order to capture the precise low-level stack state in between an
1323 // instruction's input and output. During code generation, LSnapshots are
1324 // compressed and saved in the compiled script.
1325 class LSnapshot : public TempObject {
1326  private:
1327   LAllocation* slots_;
1328   LRecoverInfo* recoverInfo_;
1329   SnapshotOffset snapshotOffset_;
1330   uint32_t numSlots_;
1331   BailoutId bailoutId_;
1332   BailoutKind bailoutKind_;
1333 
1334   LSnapshot(LRecoverInfo* recover, BailoutKind kind);
1335   [[nodiscard]] bool init(MIRGenerator* gen);
1336 
1337  public:
1338   static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
1339                         BailoutKind kind);
1340 
numEntries()1341   size_t numEntries() const { return numSlots_; }
numSlots()1342   size_t numSlots() const { return numSlots_ / BOX_PIECES; }
payloadOfSlot(size_t i)1343   LAllocation* payloadOfSlot(size_t i) {
1344     MOZ_ASSERT(i < numSlots());
1345     size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
1346     return getEntry(entryIndex);
1347   }
1348 #ifdef JS_NUNBOX32
typeOfSlot(size_t i)1349   LAllocation* typeOfSlot(size_t i) {
1350     MOZ_ASSERT(i < numSlots());
1351     size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
1352     return getEntry(entryIndex);
1353   }
1354 #endif
getEntry(size_t i)1355   LAllocation* getEntry(size_t i) {
1356     MOZ_ASSERT(i < numSlots_);
1357     return &slots_[i];
1358   }
setEntry(size_t i,const LAllocation & alloc)1359   void setEntry(size_t i, const LAllocation& alloc) {
1360     MOZ_ASSERT(i < numSlots_);
1361     slots_[i] = alloc;
1362   }
recoverInfo()1363   LRecoverInfo* recoverInfo() const { return recoverInfo_; }
mir()1364   MResumePoint* mir() const { return recoverInfo()->mir(); }
snapshotOffset()1365   SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
bailoutId()1366   BailoutId bailoutId() const { return bailoutId_; }
setSnapshotOffset(SnapshotOffset offset)1367   void setSnapshotOffset(SnapshotOffset offset) {
1368     MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
1369     snapshotOffset_ = offset;
1370   }
setBailoutId(BailoutId id)1371   void setBailoutId(BailoutId id) {
1372     MOZ_ASSERT(bailoutId_ == INVALID_BAILOUT_ID);
1373     bailoutId_ = id;
1374   }
bailoutKind()1375   BailoutKind bailoutKind() const { return bailoutKind_; }
1376   void rewriteRecoveredInput(LUse input);
1377 };
1378 
1379 struct SafepointSlotEntry {
1380   // Flag indicating whether this is a slot in the stack or argument space.
1381   uint32_t stack : 1;
1382 
1383   // Byte offset of the slot, as in LStackSlot or LArgument.
1384   uint32_t slot : 31;
1385 
SafepointSlotEntrySafepointSlotEntry1386   SafepointSlotEntry() : stack(0), slot(0) {}
SafepointSlotEntrySafepointSlotEntry1387   SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
SafepointSlotEntrySafepointSlotEntry1388   explicit SafepointSlotEntry(const LAllocation* a)
1389       : stack(a->isStackSlot()), slot(a->memorySlot()) {}
1390 };
1391 
1392 struct SafepointNunboxEntry {
1393   uint32_t typeVreg;
1394   LAllocation type;
1395   LAllocation payload;
1396 
SafepointNunboxEntrySafepointNunboxEntry1397   SafepointNunboxEntry() : typeVreg(0) {}
SafepointNunboxEntrySafepointNunboxEntry1398   SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
1399       : typeVreg(typeVreg), type(type), payload(payload) {}
1400 };
1401 
1402 class LSafepoint : public TempObject {
1403   using SlotEntry = SafepointSlotEntry;
1404   using NunboxEntry = SafepointNunboxEntry;
1405 
1406  public:
1407   typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
1408   typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
1409 
1410  private:
1411   // The information in a safepoint describes the registers and gc related
1412   // values that are live at the start of the associated instruction.
1413 
1414   // The set of registers which are live at an OOL call made within the
1415   // instruction. This includes any registers for inputs which are not
1416   // use-at-start, any registers for temps, and any registers live after the
1417   // call except outputs of the instruction.
1418   //
1419   // For call instructions, the live regs are empty. Call instructions may
1420   // have register inputs or temporaries, which will *not* be in the live
1421   // registers: if passed to the call, the values passed will be marked via
1422   // TraceJitExitFrame, and no registers can be live after the instruction
1423   // except its outputs.
1424   LiveRegisterSet liveRegs_;
1425 
1426   // The subset of liveRegs which contains gcthing pointers.
1427   LiveGeneralRegisterSet gcRegs_;
1428 
1429 #ifdef CHECK_OSIPOINT_REGISTERS
1430   // Clobbered regs of the current instruction. This set is never written to
1431   // the safepoint; it's only used by assertions during compilation.
1432   LiveRegisterSet clobberedRegs_;
1433 #endif
1434 
1435   // Offset to a position in the safepoint stream, or
1436   // INVALID_SAFEPOINT_OFFSET.
1437   uint32_t safepointOffset_;
1438 
1439   // Assembler buffer displacement to OSI point's call location.
1440   uint32_t osiCallPointOffset_;
1441 
1442   // List of slots which have gcthing pointers.
1443   SlotList gcSlots_;
1444 
1445 #ifdef JS_NUNBOX32
1446   // List of registers (in liveRegs) and slots which contain pieces of Values.
1447   NunboxList nunboxParts_;
1448 #elif JS_PUNBOX64
1449   // List of slots which have Values.
1450   SlotList valueSlots_;
1451 
1452   // The subset of liveRegs which have Values.
1453   LiveGeneralRegisterSet valueRegs_;
1454 #endif
1455 
1456   // The subset of liveRegs which contains pointers to slots/elements.
1457   LiveGeneralRegisterSet slotsOrElementsRegs_;
1458 
1459   // List of slots which have slots/elements pointers.
1460   SlotList slotsOrElementsSlots_;
1461 
1462   // Wasm only: with what kind of instruction is this LSafepoint associated?
1463   // true => wasm trap, false => wasm call.
1464   bool isWasmTrap_;
1465 
1466   // Wasm only: what is the value of masm.framePushed() that corresponds to
1467   // the lowest-addressed word covered by the StackMap that we will generate
1468   // from this LSafepoint?  This depends on the instruction:
1469   //
1470   // if isWasmTrap_ == true:
1471   //    masm.framePushed() unmodified.  Note that when constructing the
1472   //    StackMap we will add entries below this point to take account of
1473   //    registers dumped on the stack as a result of the trap.
1474   //
1475   // if isWasmTrap_ == false:
1476   //    masm.framePushed() - StackArgAreaSizeUnaligned(arg types for the call),
1477   //    because the map does not include the outgoing args themselves, but
1478   //    it does cover any and all alignment space above them.
1479   uint32_t framePushedAtStackMapBase_;
1480 
1481  public:
assertInvariants()1482   void assertInvariants() {
1483     // Every register in valueRegs and gcRegs should also be in liveRegs.
1484 #ifndef JS_NUNBOX32
1485     MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1486 #endif
1487     MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1488   }
1489 
LSafepoint(TempAllocator & alloc)1490   explicit LSafepoint(TempAllocator& alloc)
1491       : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
1492         osiCallPointOffset_(0),
1493         gcSlots_(alloc),
1494 #ifdef JS_NUNBOX32
1495         nunboxParts_(alloc),
1496 #else
1497         valueSlots_(alloc),
1498 #endif
1499         slotsOrElementsSlots_(alloc),
1500         isWasmTrap_(false),
1501         framePushedAtStackMapBase_(0) {
1502     assertInvariants();
1503   }
addLiveRegister(AnyRegister reg)1504   void addLiveRegister(AnyRegister reg) {
1505     liveRegs_.addUnchecked(reg);
1506     assertInvariants();
1507   }
liveRegs()1508   const LiveRegisterSet& liveRegs() const { return liveRegs_; }
1509 #ifdef CHECK_OSIPOINT_REGISTERS
addClobberedRegister(AnyRegister reg)1510   void addClobberedRegister(AnyRegister reg) {
1511     clobberedRegs_.addUnchecked(reg);
1512     assertInvariants();
1513   }
clobberedRegs()1514   const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
1515 #endif
addGcRegister(Register reg)1516   void addGcRegister(Register reg) {
1517     gcRegs_.addUnchecked(reg);
1518     assertInvariants();
1519   }
gcRegs()1520   LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
addGcSlot(bool stack,uint32_t slot)1521   [[nodiscard]] bool addGcSlot(bool stack, uint32_t slot) {
1522     bool result = gcSlots_.append(SlotEntry(stack, slot));
1523     if (result) {
1524       assertInvariants();
1525     }
1526     return result;
1527   }
gcSlots()1528   SlotList& gcSlots() { return gcSlots_; }
1529 
slotsOrElementsSlots()1530   SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
slotsOrElementsRegs()1531   LiveGeneralRegisterSet slotsOrElementsRegs() const {
1532     return slotsOrElementsRegs_;
1533   }
addSlotsOrElementsRegister(Register reg)1534   void addSlotsOrElementsRegister(Register reg) {
1535     slotsOrElementsRegs_.addUnchecked(reg);
1536     assertInvariants();
1537   }
addSlotsOrElementsSlot(bool stack,uint32_t slot)1538   [[nodiscard]] bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
1539     bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
1540     if (result) {
1541       assertInvariants();
1542     }
1543     return result;
1544   }
addSlotsOrElementsPointer(LAllocation alloc)1545   [[nodiscard]] bool addSlotsOrElementsPointer(LAllocation alloc) {
1546     if (alloc.isMemory()) {
1547       return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
1548     }
1549     MOZ_ASSERT(alloc.isRegister());
1550     addSlotsOrElementsRegister(alloc.toRegister().gpr());
1551     assertInvariants();
1552     return true;
1553   }
hasSlotsOrElementsPointer(LAllocation alloc)1554   bool hasSlotsOrElementsPointer(LAllocation alloc) const {
1555     if (alloc.isRegister()) {
1556       return slotsOrElementsRegs().has(alloc.toRegister().gpr());
1557     }
1558     for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
1559       const SlotEntry& entry = slotsOrElementsSlots_[i];
1560       if (entry.stack == alloc.isStackSlot() &&
1561           entry.slot == alloc.memorySlot()) {
1562         return true;
1563       }
1564     }
1565     return false;
1566   }
1567 
addGcPointer(LAllocation alloc)1568   [[nodiscard]] bool addGcPointer(LAllocation alloc) {
1569     if (alloc.isMemory()) {
1570       return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
1571     }
1572     if (alloc.isRegister()) {
1573       addGcRegister(alloc.toRegister().gpr());
1574     }
1575     assertInvariants();
1576     return true;
1577   }
1578 
hasGcPointer(LAllocation alloc)1579   bool hasGcPointer(LAllocation alloc) const {
1580     if (alloc.isRegister()) {
1581       return gcRegs().has(alloc.toRegister().gpr());
1582     }
1583     MOZ_ASSERT(alloc.isMemory());
1584     for (size_t i = 0; i < gcSlots_.length(); i++) {
1585       if (gcSlots_[i].stack == alloc.isStackSlot() &&
1586           gcSlots_[i].slot == alloc.memorySlot()) {
1587         return true;
1588       }
1589     }
1590     return false;
1591   }
1592 
1593   // Return true if all GC-managed pointers from `alloc` are recorded in this
1594   // safepoint.
hasAllGcPointersFromStackArea(LAllocation alloc)1595   bool hasAllGcPointersFromStackArea(LAllocation alloc) const {
1596     for (LStackArea::ResultIterator iter = alloc.toStackArea()->results(); iter;
1597          iter.next()) {
1598       if (iter.isGcPointer() && !hasGcPointer(iter.alloc())) {
1599         return false;
1600       }
1601     }
1602     return true;
1603   }
1604 
1605 #ifdef JS_NUNBOX32
addNunboxParts(uint32_t typeVreg,LAllocation type,LAllocation payload)1606   [[nodiscard]] bool addNunboxParts(uint32_t typeVreg, LAllocation type,
1607                                     LAllocation payload) {
1608     bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
1609     if (result) {
1610       assertInvariants();
1611     }
1612     return result;
1613   }
1614 
addNunboxType(uint32_t typeVreg,LAllocation type)1615   [[nodiscard]] bool addNunboxType(uint32_t typeVreg, LAllocation type) {
1616     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1617       if (nunboxParts_[i].type == type) {
1618         return true;
1619       }
1620       if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
1621         nunboxParts_[i].type = type;
1622         return true;
1623       }
1624     }
1625 
1626     // vregs for nunbox pairs are adjacent, with the type coming first.
1627     uint32_t payloadVreg = typeVreg + 1;
1628     bool result = nunboxParts_.append(
1629         NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
1630     if (result) {
1631       assertInvariants();
1632     }
1633     return result;
1634   }
1635 
addNunboxPayload(uint32_t payloadVreg,LAllocation payload)1636   [[nodiscard]] bool addNunboxPayload(uint32_t payloadVreg,
1637                                       LAllocation payload) {
1638     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1639       if (nunboxParts_[i].payload == payload) {
1640         return true;
1641       }
1642       if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
1643         nunboxParts_[i].payload = payload;
1644         return true;
1645       }
1646     }
1647 
1648     // vregs for nunbox pairs are adjacent, with the type coming first.
1649     uint32_t typeVreg = payloadVreg - 1;
1650     bool result = nunboxParts_.append(
1651         NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
1652     if (result) {
1653       assertInvariants();
1654     }
1655     return result;
1656   }
1657 
findTypeAllocation(uint32_t typeVreg)1658   LAllocation findTypeAllocation(uint32_t typeVreg) {
1659     // Look for some allocation for the specified type vreg, to go with a
1660     // partial nunbox entry for the payload. Note that we don't need to
1661     // look at the value slots in the safepoint, as these aren't used by
1662     // register allocators which add partial nunbox entries.
1663     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1664       if (nunboxParts_[i].typeVreg == typeVreg &&
1665           !nunboxParts_[i].type.isUse()) {
1666         return nunboxParts_[i].type;
1667       }
1668     }
1669     return LUse(typeVreg, LUse::ANY);
1670   }
1671 
1672 #  ifdef DEBUG
hasNunboxPayload(LAllocation payload)1673   bool hasNunboxPayload(LAllocation payload) const {
1674     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1675       if (nunboxParts_[i].payload == payload) {
1676         return true;
1677       }
1678     }
1679     return false;
1680   }
1681 #  endif
1682 
nunboxParts()1683   NunboxList& nunboxParts() { return nunboxParts_; }
1684 
1685 #elif JS_PUNBOX64
addValueSlot(bool stack,uint32_t slot)1686   [[nodiscard]] bool addValueSlot(bool stack, uint32_t slot) {
1687     bool result = valueSlots_.append(SlotEntry(stack, slot));
1688     if (result) {
1689       assertInvariants();
1690     }
1691     return result;
1692   }
valueSlots()1693   SlotList& valueSlots() { return valueSlots_; }
1694 
hasValueSlot(bool stack,uint32_t slot)1695   bool hasValueSlot(bool stack, uint32_t slot) const {
1696     for (size_t i = 0; i < valueSlots_.length(); i++) {
1697       if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot) {
1698         return true;
1699       }
1700     }
1701     return false;
1702   }
1703 
addValueRegister(Register reg)1704   void addValueRegister(Register reg) {
1705     valueRegs_.add(reg);
1706     assertInvariants();
1707   }
valueRegs()1708   LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
1709 
addBoxedValue(LAllocation alloc)1710   [[nodiscard]] bool addBoxedValue(LAllocation alloc) {
1711     if (alloc.isRegister()) {
1712       Register reg = alloc.toRegister().gpr();
1713       if (!valueRegs().has(reg)) {
1714         addValueRegister(reg);
1715       }
1716       return true;
1717     }
1718     if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot())) {
1719       return true;
1720     }
1721     return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1722   }
1723 
hasBoxedValue(LAllocation alloc)1724   bool hasBoxedValue(LAllocation alloc) const {
1725     if (alloc.isRegister()) {
1726       return valueRegs().has(alloc.toRegister().gpr());
1727     }
1728     return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1729   }
1730 
1731 #endif  // JS_PUNBOX64
1732 
encoded()1733   bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
offset()1734   uint32_t offset() const {
1735     MOZ_ASSERT(encoded());
1736     return safepointOffset_;
1737   }
setOffset(uint32_t offset)1738   void setOffset(uint32_t offset) { safepointOffset_ = offset; }
osiReturnPointOffset()1739   uint32_t osiReturnPointOffset() const {
1740     // In general, pointer arithmetic on code is bad, but in this case,
1741     // getting the return address from a call instruction, stepping over pools
1742     // would be wrong.
1743     return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
1744   }
osiCallPointOffset()1745   uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
setOsiCallPointOffset(uint32_t osiCallPointOffset)1746   void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
1747     MOZ_ASSERT(!osiCallPointOffset_);
1748     osiCallPointOffset_ = osiCallPointOffset;
1749   }
1750 
isWasmTrap()1751   bool isWasmTrap() const { return isWasmTrap_; }
setIsWasmTrap()1752   void setIsWasmTrap() { isWasmTrap_ = true; }
1753 
framePushedAtStackMapBase()1754   uint32_t framePushedAtStackMapBase() const {
1755     return framePushedAtStackMapBase_;
1756   }
setFramePushedAtStackMapBase(uint32_t n)1757   void setFramePushedAtStackMapBase(uint32_t n) {
1758     MOZ_ASSERT(framePushedAtStackMapBase_ == 0);
1759     framePushedAtStackMapBase_ = n;
1760   }
1761 };
1762 
1763 class LInstruction::InputIterator {
1764  private:
1765   LInstruction& ins_;
1766   size_t idx_;
1767   bool snapshot_;
1768 
handleOperandsEnd()1769   void handleOperandsEnd() {
1770     // Iterate on the snapshot when iteration over all operands is done.
1771     if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
1772       idx_ = 0;
1773       snapshot_ = true;
1774     }
1775   }
1776 
1777  public:
InputIterator(LInstruction & ins)1778   explicit InputIterator(LInstruction& ins)
1779       : ins_(ins), idx_(0), snapshot_(false) {
1780     handleOperandsEnd();
1781   }
1782 
more()1783   bool more() const {
1784     if (snapshot_) {
1785       return idx_ < ins_.snapshot()->numEntries();
1786     }
1787     if (idx_ < ins_.numOperands()) {
1788       return true;
1789     }
1790     if (ins_.snapshot() && ins_.snapshot()->numEntries()) {
1791       return true;
1792     }
1793     return false;
1794   }
1795 
isSnapshotInput()1796   bool isSnapshotInput() const { return snapshot_; }
1797 
next()1798   void next() {
1799     MOZ_ASSERT(more());
1800     idx_++;
1801     handleOperandsEnd();
1802   }
1803 
replace(const LAllocation & alloc)1804   void replace(const LAllocation& alloc) {
1805     if (snapshot_) {
1806       ins_.snapshot()->setEntry(idx_, alloc);
1807     } else {
1808       ins_.setOperand(idx_, alloc);
1809     }
1810   }
1811 
1812   LAllocation* operator*() const {
1813     if (snapshot_) {
1814       return ins_.snapshot()->getEntry(idx_);
1815     }
1816     return ins_.getOperand(idx_);
1817   }
1818 
1819   LAllocation* operator->() const { return **this; }
1820 };
1821 
1822 class LIRGraph {
1823   struct ValueHasher {
1824     using Lookup = Value;
hashValueHasher1825     static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
matchValueHasher1826     static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
1827   };
1828 
1829   FixedList<LBlock> blocks_;
1830 
1831   // constantPool_ is a mozilla::Vector, not a js::Vector, because
1832   // js::Vector<Value> is prohibited as unsafe. This particular Vector of
1833   // Values is safe because it is only used within the scope of an
1834   // AutoSuppressGC (in IonCompile), which inhibits GC.
1835   mozilla::Vector<Value, 0, JitAllocPolicy> constantPool_;
1836   typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
1837   ConstantPoolMap constantPoolMap_;
1838   Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
1839   Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
1840   uint32_t numVirtualRegisters_;
1841   uint32_t numInstructions_;
1842 
1843   // Number of stack slots needed for local spills.
1844   uint32_t localSlotCount_;
1845   // Number of stack slots needed for argument construction for calls.
1846   uint32_t argumentSlotCount_;
1847 
1848   MIRGraph& mir_;
1849 
1850  public:
1851   explicit LIRGraph(MIRGraph* mir);
1852 
init()1853   [[nodiscard]] bool init() {
1854     return blocks_.init(mir_.alloc(), mir_.numBlocks());
1855   }
mir()1856   MIRGraph& mir() const { return mir_; }
numBlocks()1857   size_t numBlocks() const { return blocks_.length(); }
getBlock(size_t i)1858   LBlock* getBlock(size_t i) { return &blocks_[i]; }
numBlockIds()1859   uint32_t numBlockIds() const { return mir_.numBlockIds(); }
initBlock(MBasicBlock * mir)1860   [[nodiscard]] bool initBlock(MBasicBlock* mir) {
1861     auto* block = &blocks_[mir->id()];
1862     auto* lir = new (block) LBlock(mir);
1863     return lir->init(mir_.alloc());
1864   }
getVirtualRegister()1865   uint32_t getVirtualRegister() {
1866     numVirtualRegisters_ += VREG_INCREMENT;
1867     return numVirtualRegisters_;
1868   }
numVirtualRegisters()1869   uint32_t numVirtualRegisters() const {
1870     // Virtual registers are 1-based, not 0-based, so add one as a
1871     // convenience for 0-based arrays.
1872     return numVirtualRegisters_ + 1;
1873   }
getInstructionId()1874   uint32_t getInstructionId() { return numInstructions_++; }
numInstructions()1875   uint32_t numInstructions() const { return numInstructions_; }
setLocalSlotCount(uint32_t localSlotCount)1876   void setLocalSlotCount(uint32_t localSlotCount) {
1877     localSlotCount_ = localSlotCount;
1878   }
localSlotCount()1879   uint32_t localSlotCount() const { return localSlotCount_; }
1880   // Return the localSlotCount() value rounded up so that it satisfies the
1881   // platform stack alignment requirement, and so that it's a multiple of
1882   // the number of slots per Value.
paddedLocalSlotCount()1883   uint32_t paddedLocalSlotCount() const {
1884     // Round to JitStackAlignment, and implicitly to sizeof(Value) as
1885     // JitStackAlignment is a multiple of sizeof(Value). These alignments
1886     // are needed for spilling SIMD registers properly, and for
1887     // StackOffsetOfPassedArg which rounds argument slots to 8-byte
1888     // boundaries.
1889     return AlignBytes(localSlotCount(), JitStackAlignment);
1890   }
paddedLocalSlotsSize()1891   size_t paddedLocalSlotsSize() const { return paddedLocalSlotCount(); }
setArgumentSlotCount(uint32_t argumentSlotCount)1892   void setArgumentSlotCount(uint32_t argumentSlotCount) {
1893     argumentSlotCount_ = argumentSlotCount;
1894   }
argumentSlotCount()1895   uint32_t argumentSlotCount() const { return argumentSlotCount_; }
argumentsSize()1896   size_t argumentsSize() const { return argumentSlotCount() * sizeof(Value); }
totalSlotCount()1897   uint32_t totalSlotCount() const {
1898     return paddedLocalSlotCount() + argumentsSize();
1899   }
1900   [[nodiscard]] bool addConstantToPool(const Value& v, uint32_t* index);
numConstants()1901   size_t numConstants() const { return constantPool_.length(); }
constantPool()1902   Value* constantPool() { return &constantPool_[0]; }
1903 
1904   bool noteNeedsSafepoint(LInstruction* ins);
numNonCallSafepoints()1905   size_t numNonCallSafepoints() const { return nonCallSafepoints_.length(); }
getNonCallSafepoint(size_t i)1906   LInstruction* getNonCallSafepoint(size_t i) const {
1907     return nonCallSafepoints_[i];
1908   }
numSafepoints()1909   size_t numSafepoints() const { return safepoints_.length(); }
getSafepoint(size_t i)1910   LInstruction* getSafepoint(size_t i) const { return safepoints_[i]; }
1911 
1912 #ifdef JS_JITSPEW
1913   void dump(GenericPrinter& out);
1914   void dump();
1915 #endif
1916 };
1917 
LAllocation(AnyRegister reg)1918 LAllocation::LAllocation(AnyRegister reg) {
1919   if (reg.isFloat()) {
1920     *this = LFloatReg(reg.fpu());
1921   } else {
1922     *this = LGeneralReg(reg.gpr());
1923   }
1924 }
1925 
toRegister()1926 AnyRegister LAllocation::toRegister() const {
1927   MOZ_ASSERT(isRegister());
1928   if (isFloatReg()) {
1929     return AnyRegister(toFloatReg()->reg());
1930   }
1931   return AnyRegister(toGeneralReg()->reg());
1932 }
1933 
1934 }  // namespace jit
1935 }  // namespace js
1936 
1937 #include "jit/shared/LIR-shared.h"
1938 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1939 #  if defined(JS_CODEGEN_X86)
1940 #    include "jit/x86/LIR-x86.h"
1941 #  elif defined(JS_CODEGEN_X64)
1942 #    include "jit/x64/LIR-x64.h"
1943 #  endif
1944 #  include "jit/x86-shared/LIR-x86-shared.h"
1945 #elif defined(JS_CODEGEN_ARM)
1946 #  include "jit/arm/LIR-arm.h"
1947 #elif defined(JS_CODEGEN_ARM64)
1948 #  include "jit/arm64/LIR-arm64.h"
1949 #elif defined(JS_CODEGEN_LOONG64)
1950 #  include "jit/loong64/LIR-loong64.h"
1951 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1952 #  if defined(JS_CODEGEN_MIPS32)
1953 #    include "jit/mips32/LIR-mips32.h"
1954 #  elif defined(JS_CODEGEN_MIPS64)
1955 #    include "jit/mips64/LIR-mips64.h"
1956 #  endif
1957 #  include "jit/mips-shared/LIR-mips-shared.h"
1958 #elif defined(JS_CODEGEN_NONE)
1959 #  include "jit/none/LIR-none.h"
1960 #else
1961 #  error "Unknown architecture!"
1962 #endif
1963 
1964 #undef LIR_HEADER
1965 
1966 namespace js {
1967 namespace jit {
1968 
1969 #define LIROP(name)                           \
1970   L##name* LNode::to##name() {                \
1971     MOZ_ASSERT(is##name());                   \
1972     return static_cast<L##name*>(this);       \
1973   }                                           \
1974   const L##name* LNode::to##name() const {    \
1975     MOZ_ASSERT(is##name());                   \
1976     return static_cast<const L##name*>(this); \
1977   }
1978 LIR_OPCODE_LIST(LIROP)
1979 #undef LIROP
1980 
1981 #define LALLOC_CAST(type)               \
1982   L##type* LAllocation::to##type() {    \
1983     MOZ_ASSERT(is##type());             \
1984     return static_cast<L##type*>(this); \
1985   }
1986 #define LALLOC_CONST_CAST(type)                  \
1987   const L##type* LAllocation::to##type() const { \
1988     MOZ_ASSERT(is##type());                      \
1989     return static_cast<const L##type*>(this);    \
1990   }
1991 
1992 LALLOC_CAST(Use)
1993 LALLOC_CONST_CAST(Use)
1994 LALLOC_CONST_CAST(GeneralReg)
1995 LALLOC_CONST_CAST(FloatReg)
1996 LALLOC_CONST_CAST(StackSlot)
1997 LALLOC_CAST(StackArea)
1998 LALLOC_CONST_CAST(StackArea)
1999 LALLOC_CONST_CAST(Argument)
2000 LALLOC_CONST_CAST(ConstantIndex)
2001 
2002 #undef LALLOC_CAST
2003 
2004 }  // namespace jit
2005 }  // namespace js
2006 
2007 #endif /* jit_LIR_h */
2008