1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_LIR_h
8 #define jit_LIR_h
9 
10 // This file declares the core data structures for LIR: storage allocations for
11 // inputs and outputs, as well as the interface instructions must conform to.
12 
13 #include "mozilla/Array.h"
14 
15 #include "jit/Bailouts.h"
16 #include "jit/FixedList.h"
17 #include "jit/InlineList.h"
18 #include "jit/JitAllocPolicy.h"
19 #include "jit/LOpcodes.h"
20 #include "jit/MIR.h"
21 #include "jit/MIRGraph.h"
22 #include "jit/Registers.h"
23 #include "jit/Safepoints.h"
24 
25 namespace js {
26 namespace jit {
27 
28 class LUse;
29 class LGeneralReg;
30 class LFloatReg;
31 class LStackSlot;
32 class LArgument;
33 class LConstantIndex;
34 class MBasicBlock;
35 class MIRGenerator;
36 
37 static const uint32_t VREG_INCREMENT = 1;
38 
39 static const uint32_t THIS_FRAME_ARGSLOT = 0;
40 
41 #if defined(JS_NUNBOX32)
42 #define BOX_PIECES 2
43 static const uint32_t VREG_TYPE_OFFSET = 0;
44 static const uint32_t VREG_DATA_OFFSET = 1;
45 static const uint32_t TYPE_INDEX = 0;
46 static const uint32_t PAYLOAD_INDEX = 1;
47 static const uint32_t INT64LOW_INDEX = 0;
48 static const uint32_t INT64HIGH_INDEX = 1;
49 #elif defined(JS_PUNBOX64)
50 #define BOX_PIECES 1
51 #else
52 #error "Unknown!"
53 #endif
54 
55 static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
56 
57 // Represents storage for an operand. For constants, the pointer is tagged
58 // with a single bit, and the untagged pointer is a pointer to a Value.
59 class LAllocation : public TempObject {
60   uintptr_t bits_;
61 
62   // 3 bits gives us enough for an interesting set of Kinds and also fits
63   // within the alignment bits of pointers to Value, which are always
64   // 8-byte aligned.
65   static const uintptr_t KIND_BITS = 3;
66   static const uintptr_t KIND_SHIFT = 0;
67   static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
68 
69  protected:
70   static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
71   static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
72 
73  public:
74   enum Kind {
75     CONSTANT_VALUE,  // MConstant*.
76     CONSTANT_INDEX,  // Constant arbitrary index.
77     USE,         // Use of a virtual register, with physical allocation policy.
78     GPR,         // General purpose register.
79     FPU,         // Floating-point register.
80     STACK_SLOT,  // Stack slot.
81     ARGUMENT_SLOT  // Argument slot.
82   };
83 
84   static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1;
85 
86  protected:
data()87   uint32_t data() const { return uint32_t(bits_) >> DATA_SHIFT; }
setData(uint32_t data)88   void setData(uint32_t data) {
89     MOZ_ASSERT(data <= DATA_MASK);
90     bits_ &= ~(DATA_MASK << DATA_SHIFT);
91     bits_ |= (data << DATA_SHIFT);
92   }
setKindAndData(Kind kind,uint32_t data)93   void setKindAndData(Kind kind, uint32_t data) {
94     MOZ_ASSERT(data <= DATA_MASK);
95     bits_ = (uint32_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
96   }
97 
LAllocation(Kind kind,uint32_t data)98   LAllocation(Kind kind, uint32_t data) { setKindAndData(kind, data); }
LAllocation(Kind kind)99   explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
100 
101  public:
LAllocation()102   LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
103 
104   // The MConstant pointer must have its low bits cleared.
LAllocation(const MConstant * c)105   explicit LAllocation(const MConstant* c) {
106     MOZ_ASSERT(c);
107     bits_ = uintptr_t(c);
108     MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
109     bits_ |= CONSTANT_VALUE << KIND_SHIFT;
110   }
111   inline explicit LAllocation(AnyRegister reg);
112 
kind()113   Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
114 
isBogus()115   bool isBogus() const { return bits_ == 0; }
isUse()116   bool isUse() const { return kind() == USE; }
isConstant()117   bool isConstant() const { return isConstantValue() || isConstantIndex(); }
isConstantValue()118   bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
isConstantIndex()119   bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
isGeneralReg()120   bool isGeneralReg() const { return kind() == GPR; }
isFloatReg()121   bool isFloatReg() const { return kind() == FPU; }
isStackSlot()122   bool isStackSlot() const { return kind() == STACK_SLOT; }
isArgument()123   bool isArgument() const { return kind() == ARGUMENT_SLOT; }
isRegister()124   bool isRegister() const { return isGeneralReg() || isFloatReg(); }
isRegister(bool needFloat)125   bool isRegister(bool needFloat) const {
126     return needFloat ? isFloatReg() : isGeneralReg();
127   }
isMemory()128   bool isMemory() const { return isStackSlot() || isArgument(); }
129   inline uint32_t memorySlot() const;
130   inline LUse* toUse();
131   inline const LUse* toUse() const;
132   inline const LGeneralReg* toGeneralReg() const;
133   inline const LFloatReg* toFloatReg() const;
134   inline const LStackSlot* toStackSlot() const;
135   inline const LArgument* toArgument() const;
136   inline const LConstantIndex* toConstantIndex() const;
137   inline AnyRegister toRegister() const;
138 
toConstant()139   const MConstant* toConstant() const {
140     MOZ_ASSERT(isConstantValue());
141     return reinterpret_cast<const MConstant*>(bits_ &
142                                               ~(KIND_MASK << KIND_SHIFT));
143   }
144 
145   bool operator==(const LAllocation& other) const {
146     return bits_ == other.bits_;
147   }
148 
149   bool operator!=(const LAllocation& other) const {
150     return bits_ != other.bits_;
151   }
152 
hash()153   HashNumber hash() const { return bits_; }
154 
155   UniqueChars toString() const;
156   bool aliases(const LAllocation& other) const;
157   void dump() const;
158 };
159 
160 class LUse : public LAllocation {
161   static const uint32_t POLICY_BITS = 3;
162   static const uint32_t POLICY_SHIFT = 0;
163   static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
164   static const uint32_t REG_BITS = 6;
165   static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
166   static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
167 
168   // Whether the physical register for this operand may be reused for a def.
169   static const uint32_t USED_AT_START_BITS = 1;
170   static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
171   static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
172 
173  public:
174   // Virtual registers get the remaining 19 bits.
175   static const uint32_t VREG_BITS =
176       DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
177   static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
178   static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
179 
180   enum Policy {
181     // Input should be in a read-only register or stack slot.
182     ANY,
183 
184     // Input must be in a read-only register.
185     REGISTER,
186 
187     // Input must be in a specific, read-only register.
188     FIXED,
189 
190     // Keep the used virtual register alive, and use whatever allocation is
191     // available. This is similar to ANY but hints to the register allocator
192     // that it is never useful to optimize this site.
193     KEEPALIVE,
194 
195     // For snapshot inputs, indicates that the associated instruction will
196     // write this input to its output register before bailing out.
197     // The register allocator may thus allocate that output register, and
198     // does not need to keep the virtual register alive (alternatively,
199     // this may be treated as KEEPALIVE).
200     RECOVERED_INPUT
201   };
202 
set(Policy policy,uint32_t reg,bool usedAtStart)203   void set(Policy policy, uint32_t reg, bool usedAtStart) {
204     setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
205                             ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
206   }
207 
208  public:
209   LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
210     set(policy, 0, usedAtStart);
211     setVirtualRegister(vreg);
212   }
213   explicit LUse(Policy policy, bool usedAtStart = false) {
214     set(policy, 0, usedAtStart);
215   }
216   explicit LUse(Register reg, bool usedAtStart = false) {
217     set(FIXED, reg.code(), usedAtStart);
218   }
219   explicit LUse(FloatRegister reg, bool usedAtStart = false) {
220     set(FIXED, reg.code(), usedAtStart);
221   }
222   LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
223     set(FIXED, reg.code(), usedAtStart);
224     setVirtualRegister(virtualRegister);
225   }
226   LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
227     set(FIXED, reg.code(), usedAtStart);
228     setVirtualRegister(virtualRegister);
229   }
230 
setVirtualRegister(uint32_t index)231   void setVirtualRegister(uint32_t index) {
232     MOZ_ASSERT(index < VREG_MASK);
233 
234     uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
235     setData(old | (index << VREG_SHIFT));
236   }
237 
policy()238   Policy policy() const {
239     Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
240     return policy;
241   }
virtualRegister()242   uint32_t virtualRegister() const {
243     uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
244     MOZ_ASSERT(index != 0);
245     return index;
246   }
registerCode()247   uint32_t registerCode() const {
248     MOZ_ASSERT(policy() == FIXED);
249     return (data() >> REG_SHIFT) & REG_MASK;
250   }
isFixedRegister()251   bool isFixedRegister() const { return policy() == FIXED; }
usedAtStart()252   bool usedAtStart() const {
253     return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
254   }
255 };
256 
257 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
258 
259 class LBoxAllocation {
260 #ifdef JS_NUNBOX32
261   LAllocation type_;
262   LAllocation payload_;
263 #else
264   LAllocation value_;
265 #endif
266 
267  public:
268 #ifdef JS_NUNBOX32
LBoxAllocation(LAllocation type,LAllocation payload)269   LBoxAllocation(LAllocation type, LAllocation payload)
270       : type_(type), payload_(payload) {}
271 
type()272   LAllocation type() const { return type_; }
payload()273   LAllocation payload() const { return payload_; }
274 #else
275   explicit LBoxAllocation(LAllocation value) : value_(value) {}
276 
277   LAllocation value() const { return value_; }
278 #endif
279 };
280 
281 template <class ValT>
282 class LInt64Value {
283 #if JS_BITS_PER_WORD == 32
284   ValT high_;
285   ValT low_;
286 #else
287   ValT value_;
288 #endif
289 
290  public:
291 #if JS_BITS_PER_WORD == 32
LInt64Value(ValT high,ValT low)292   LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
293 
high()294   ValT high() const { return high_; }
low()295   ValT low() const { return low_; }
296 #else
297   explicit LInt64Value(ValT value) : value_(value) {}
298 
299   ValT value() const { return value_; }
300 #endif
301 };
302 
303 using LInt64Allocation = LInt64Value<LAllocation>;
304 
305 class LGeneralReg : public LAllocation {
306  public:
LGeneralReg(Register reg)307   explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
308 
reg()309   Register reg() const { return Register::FromCode(data()); }
310 };
311 
312 class LFloatReg : public LAllocation {
313  public:
LFloatReg(FloatRegister reg)314   explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
315 
reg()316   FloatRegister reg() const { return FloatRegister::FromCode(data()); }
317 };
318 
319 // Arbitrary constant index.
320 class LConstantIndex : public LAllocation {
LConstantIndex(uint32_t index)321   explicit LConstantIndex(uint32_t index)
322       : LAllocation(CONSTANT_INDEX, index) {}
323 
324  public:
FromIndex(uint32_t index)325   static LConstantIndex FromIndex(uint32_t index) {
326     return LConstantIndex(index);
327   }
328 
index()329   uint32_t index() const { return data(); }
330 };
331 
332 // Stack slots are indices into the stack. The indices are byte indices.
333 class LStackSlot : public LAllocation {
334  public:
LStackSlot(uint32_t slot)335   explicit LStackSlot(uint32_t slot) : LAllocation(STACK_SLOT, slot) {}
336 
slot()337   uint32_t slot() const { return data(); }
338 };
339 
340 // Arguments are reverse indices into the stack. The indices are byte indices.
341 class LArgument : public LAllocation {
342  public:
LArgument(uint32_t index)343   explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
344 
index()345   uint32_t index() const { return data(); }
346 };
347 
memorySlot()348 inline uint32_t LAllocation::memorySlot() const {
349   MOZ_ASSERT(isMemory());
350   return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
351 }
352 
353 // Represents storage for a definition.
354 class LDefinition {
355   // Bits containing policy, type, and virtual register.
356   uint32_t bits_;
357 
358   // Before register allocation, this optionally contains a fixed policy.
359   // Register allocation assigns this field to a physical policy if none is
360   // fixed.
361   //
362   // Right now, pre-allocated outputs are limited to the following:
363   //   * Physical argument stack slots.
364   //   * Physical registers.
365   LAllocation output_;
366 
367   static const uint32_t TYPE_BITS = 4;
368   static const uint32_t TYPE_SHIFT = 0;
369   static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
370   static const uint32_t POLICY_BITS = 2;
371   static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
372   static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
373 
374   static const uint32_t VREG_BITS =
375       (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
376   static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
377   static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
378 
379  public:
380   // Note that definitions, by default, are always allocated a register,
381   // unless the policy specifies that an input can be re-used and that input
382   // is a stack slot.
383   enum Policy {
384     // The policy is predetermined by the LAllocation attached to this
385     // definition. The allocation may be:
386     //   * A register, which may not appear as any fixed temporary.
387     //   * A stack slot or argument.
388     //
389     // Register allocation will not modify a fixed allocation.
390     FIXED,
391 
392     // A random register of an appropriate class will be assigned.
393     REGISTER,
394 
395     // One definition per instruction must re-use the first input
396     // allocation, which (for now) must be a register.
397     MUST_REUSE_INPUT
398   };
399 
400   // This should be kept in sync with LIR.cpp's TypeChars.
401   enum Type {
402     GENERAL,     // Generic, integer or pointer-width data (GPR).
403     INT32,       // int32 data (GPR).
404     OBJECT,      // Pointer that may be collected as garbage (GPR).
405     SLOTS,       // Slots/elements pointer that may be moved by minor GCs (GPR).
406     FLOAT32,     // 32-bit floating-point value (FPU).
407     DOUBLE,      // 64-bit floating-point value (FPU).
408     SIMD128INT,  // 128-bit SIMD integer vector (FPU).
409     SIMD128FLOAT,  // 128-bit SIMD floating point vector (FPU).
410     SINCOS,
411 #ifdef JS_NUNBOX32
412     // A type virtual register must be followed by a payload virtual
413     // register, as both will be tracked as a single gcthing.
414     TYPE,
415     PAYLOAD
416 #else
417     BOX  // Joined box, for punbox systems. (GPR, gcthing)
418 #endif
419   };
420 
set(uint32_t index,Type type,Policy policy)421   void set(uint32_t index, Type type, Policy policy) {
422     JS_STATIC_ASSERT(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
423     bits_ =
424         (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
425     MOZ_ASSERT_IF(!SupportsSimd, !isSimdType());
426   }
427 
428  public:
429   LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
430     set(index, type, policy);
431   }
432 
433   explicit LDefinition(Type type, Policy policy = REGISTER) {
434     set(0, type, policy);
435   }
436 
LDefinition(Type type,const LAllocation & a)437   LDefinition(Type type, const LAllocation& a) : output_(a) {
438     set(0, type, FIXED);
439   }
440 
LDefinition(uint32_t index,Type type,const LAllocation & a)441   LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
442     set(index, type, FIXED);
443   }
444 
LDefinition()445   LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
446 
BogusTemp()447   static LDefinition BogusTemp() { return LDefinition(); }
448 
policy()449   Policy policy() const {
450     return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
451   }
type()452   Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
isSimdType()453   bool isSimdType() const {
454     return type() == SIMD128INT || type() == SIMD128FLOAT;
455   }
isCompatibleReg(const AnyRegister & r)456   bool isCompatibleReg(const AnyRegister& r) const {
457     if (isFloatReg() && r.isFloat()) {
458       if (type() == FLOAT32) return r.fpu().isSingle();
459       if (type() == DOUBLE) return r.fpu().isDouble();
460       if (isSimdType()) return r.fpu().isSimd128();
461       MOZ_CRASH("Unexpected MDefinition type");
462     }
463     return !isFloatReg() && !r.isFloat();
464   }
isCompatibleDef(const LDefinition & other)465   bool isCompatibleDef(const LDefinition& other) const {
466 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
467     if (isFloatReg() && other.isFloatReg()) return type() == other.type();
468     return !isFloatReg() && !other.isFloatReg();
469 #else
470     return isFloatReg() == other.isFloatReg();
471 #endif
472   }
473 
isFloatReg()474   bool isFloatReg() const {
475     return type() == FLOAT32 || type() == DOUBLE || isSimdType();
476   }
virtualRegister()477   uint32_t virtualRegister() const {
478     uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
479     // MOZ_ASSERT(index != 0);
480     return index;
481   }
output()482   LAllocation* output() { return &output_; }
output()483   const LAllocation* output() const { return &output_; }
isFixed()484   bool isFixed() const { return policy() == FIXED; }
isBogusTemp()485   bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
setVirtualRegister(uint32_t index)486   void setVirtualRegister(uint32_t index) {
487     MOZ_ASSERT(index < VREG_MASK);
488     bits_ &= ~(VREG_MASK << VREG_SHIFT);
489     bits_ |= index << VREG_SHIFT;
490   }
setOutput(const LAllocation & a)491   void setOutput(const LAllocation& a) {
492     output_ = a;
493     if (!a.isUse()) {
494       bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
495       bits_ |= FIXED << POLICY_SHIFT;
496     }
497   }
setReusedInput(uint32_t operand)498   void setReusedInput(uint32_t operand) {
499     output_ = LConstantIndex::FromIndex(operand);
500   }
getReusedInput()501   uint32_t getReusedInput() const {
502     MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
503     return output_.toConstantIndex()->index();
504   }
505 
TypeFrom(MIRType type)506   static inline Type TypeFrom(MIRType type) {
507     switch (type) {
508       case MIRType::Boolean:
509       case MIRType::Int32:
510         // The stack slot allocator doesn't currently support allocating
511         // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
512         static_assert(sizeof(bool) <= sizeof(int32_t),
513                       "bool doesn't fit in an int32 slot");
514         return LDefinition::INT32;
515       case MIRType::String:
516       case MIRType::Symbol:
517       case MIRType::Object:
518       case MIRType::ObjectOrNull:
519         return LDefinition::OBJECT;
520       case MIRType::Double:
521         return LDefinition::DOUBLE;
522       case MIRType::Float32:
523         return LDefinition::FLOAT32;
524 #if defined(JS_PUNBOX64)
525       case MIRType::Value:
526         return LDefinition::BOX;
527 #endif
528       case MIRType::SinCosDouble:
529         return LDefinition::SINCOS;
530       case MIRType::Slots:
531       case MIRType::Elements:
532         return LDefinition::SLOTS;
533       case MIRType::Pointer:
534         return LDefinition::GENERAL;
535 #if defined(JS_PUNBOX64)
536       case MIRType::Int64:
537         return LDefinition::GENERAL;
538 #endif
539       case MIRType::Int8x16:
540       case MIRType::Int16x8:
541       case MIRType::Int32x4:
542       case MIRType::Bool8x16:
543       case MIRType::Bool16x8:
544       case MIRType::Bool32x4:
545         return LDefinition::SIMD128INT;
546       case MIRType::Float32x4:
547         return LDefinition::SIMD128FLOAT;
548       default:
549         MOZ_CRASH("unexpected type");
550     }
551   }
552 
553   UniqueChars toString() const;
554 
555   void dump() const;
556 };
557 
558 using LInt64Definition = LInt64Value<LDefinition>;
559 
560 // Forward declarations of LIR types.
561 #define LIROP(op) class L##op;
562 LIR_OPCODE_LIST(LIROP)
563 #undef LIROP
564 
565 class LSnapshot;
566 class LSafepoint;
567 class LInstruction;
568 class LElementVisitor;
569 
570 // The common base class for LPhi and LInstruction.
571 class LNode {
572  protected:
573   MDefinition* mir_;
574 
575  private:
576   LBlock* block_;
577   uint32_t id_;
578 
579  protected:
580   // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
581   uint32_t op_ : 10;
582   uint32_t isCall_ : 1;
583   // LPhi::numOperands() may not fit in this bitfield, so we only use this
584   // field for LInstruction.
585   uint32_t nonPhiNumOperands_ : 6;
586   // For LInstruction, the first operand is stored at offset
587   // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
588   uint32_t nonPhiOperandsOffset_ : 5;
589   uint32_t numDefs_ : 4;
590   uint32_t numTemps_ : 4;
591 
592  public:
593   enum Opcode {
594 #define LIROP(name) LOp_##name,
595     LIR_OPCODE_LIST(LIROP)
596 #undef LIROP
597         LOp_Invalid
598   };
599 
LNode(Opcode op,uint32_t nonPhiNumOperands,uint32_t numDefs,uint32_t numTemps)600   LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
601         uint32_t numTemps)
602       : mir_(nullptr),
603         block_(nullptr),
604         id_(0),
605         op_(op),
606         isCall_(false),
607         nonPhiNumOperands_(nonPhiNumOperands),
608         nonPhiOperandsOffset_(0),
609         numDefs_(numDefs),
610         numTemps_(numTemps) {
611     MOZ_ASSERT(op_ < LOp_Invalid);
612     MOZ_ASSERT(op_ == op, "opcode must fit in bitfield");
613     MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
614                "nonPhiNumOperands must fit in bitfield");
615     MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
616     MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
617   }
618 
opName()619   const char* opName() {
620     switch (op()) {
621 #define LIR_NAME_INS(name) \
622   case LOp_##name:         \
623     return #name;
624       LIR_OPCODE_LIST(LIR_NAME_INS)
625 #undef LIR_NAME_INS
626       default:
627         return "Invalid";
628     }
629   }
630 
631   // Hook for opcodes to add extra high level detail about what code will be
632   // emitted for the op.
633  private:
extraName()634   const char* extraName() const { return nullptr; }
635 
636  public:
637   const char* getExtraName() const;
638 
op()639   Opcode op() const { return Opcode(op_); }
640 
isInstruction()641   bool isInstruction() const { return op() != LOp_Phi; }
642   inline LInstruction* toInstruction();
643   inline const LInstruction* toInstruction() const;
644 
645   // Returns the number of outputs of this instruction. If an output is
646   // unallocated, it is an LDefinition, defining a virtual register.
numDefs()647   size_t numDefs() const { return numDefs_; }
648 
isCall()649   bool isCall() const { return isCall_; }
650 
651   // Does this call preserve the given register?
652   // By default, it is assumed that all registers are clobbered by a call.
653   inline bool isCallPreserved(AnyRegister reg) const;
654 
id()655   uint32_t id() const { return id_; }
setId(uint32_t id)656   void setId(uint32_t id) {
657     MOZ_ASSERT(!id_);
658     MOZ_ASSERT(id);
659     id_ = id;
660   }
setMir(MDefinition * mir)661   void setMir(MDefinition* mir) { mir_ = mir; }
mirRaw()662   MDefinition* mirRaw() const {
663     /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
664     return mir_;
665   }
block()666   LBlock* block() const { return block_; }
setBlock(LBlock * block)667   void setBlock(LBlock* block) { block_ = block; }
668 
669   // For an instruction which has a MUST_REUSE_INPUT output, whether that
670   // output register will be restored to its original value when bailing out.
671   inline bool recoversInput() const;
672 
673   void dump(GenericPrinter& out);
674   void dump();
675   static void printName(GenericPrinter& out, Opcode op);
676   void printName(GenericPrinter& out);
677   void printOperands(GenericPrinter& out);
678 
679  public:
680   // Opcode testing and casts.
681 #define LIROP(name)                                    \
682   bool is##name() const { return op() == LOp_##name; } \
683   inline L##name* to##name();                          \
684   inline const L##name* to##name() const;
685   LIR_OPCODE_LIST(LIROP)
686 #undef LIROP
687 
688 #define LIR_HEADER(opcode) \
689   static constexpr LNode::Opcode classOpcode = LNode::LOp_##opcode;
690 };
691 
692 class LInstruction : public LNode,
693                      public TempObject,
694                      public InlineListNode<LInstruction> {
695   // This snapshot could be set after a ResumePoint.  It is used to restart
696   // from the resume point pc.
697   LSnapshot* snapshot_;
698 
699   // Structure capturing the set of stack slots and registers which are known
700   // to hold either gcthings or Values.
701   LSafepoint* safepoint_;
702 
703   LMoveGroup* inputMoves_;
704   LMoveGroup* fixReuseMoves_;
705   LMoveGroup* movesAfter_;
706 
707  protected:
LInstruction(Opcode opcode,uint32_t numOperands,uint32_t numDefs,uint32_t numTemps)708   LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
709                uint32_t numTemps)
710       : LNode(opcode, numOperands, numDefs, numTemps),
711         snapshot_(nullptr),
712         safepoint_(nullptr),
713         inputMoves_(nullptr),
714         fixReuseMoves_(nullptr),
715         movesAfter_(nullptr) {}
716 
setIsCall()717   void setIsCall() { isCall_ = true; }
718 
719  public:
720   inline LDefinition* getDef(size_t index);
721 
setDef(size_t index,const LDefinition & def)722   void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
723 
getOperand(size_t index)724   LAllocation* getOperand(size_t index) const {
725     MOZ_ASSERT(index < numOperands());
726     MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
727     uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
728                   nonPhiOperandsOffset_ * sizeof(uintptr_t);
729     return reinterpret_cast<LAllocation*>(p) + index;
730   }
setOperand(size_t index,const LAllocation & a)731   void setOperand(size_t index, const LAllocation& a) {
732     *getOperand(index) = a;
733   }
734 
initOperandsOffset(size_t offset)735   void initOperandsOffset(size_t offset) {
736     MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
737     MOZ_ASSERT(offset >= sizeof(LInstruction));
738     MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
739     offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
740     nonPhiOperandsOffset_ = offset;
741     MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
742   }
743 
744   // Returns information about temporary registers needed. Each temporary
745   // register is an LDefinition with a fixed or virtual register and
746   // either GENERAL, FLOAT32, or DOUBLE type.
numTemps()747   size_t numTemps() const { return numTemps_; }
748   inline LDefinition* getTemp(size_t index);
749 
snapshot()750   LSnapshot* snapshot() const { return snapshot_; }
safepoint()751   LSafepoint* safepoint() const { return safepoint_; }
inputMoves()752   LMoveGroup* inputMoves() const { return inputMoves_; }
setInputMoves(LMoveGroup * moves)753   void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
fixReuseMoves()754   LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
setFixReuseMoves(LMoveGroup * moves)755   void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
movesAfter()756   LMoveGroup* movesAfter() const { return movesAfter_; }
setMovesAfter(LMoveGroup * moves)757   void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
numOperands()758   uint32_t numOperands() const { return nonPhiNumOperands_; }
759   void assignSnapshot(LSnapshot* snapshot);
760   void initSafepoint(TempAllocator& alloc);
761 
762   class InputIterator;
763 };
764 
toInstruction()765 LInstruction* LNode::toInstruction() {
766   MOZ_ASSERT(isInstruction());
767   return static_cast<LInstruction*>(this);
768 }
769 
toInstruction()770 const LInstruction* LNode::toInstruction() const {
771   MOZ_ASSERT(isInstruction());
772   return static_cast<const LInstruction*>(this);
773 }
774 
775 class LElementVisitor {
776   LNode* ins_;
777 
778  protected:
779   jsbytecode* lastPC_;
780   jsbytecode* lastNotInlinedPC_;
781 
instruction()782   LNode* instruction() { return ins_; }
783 
setElement(LNode * ins)784   void setElement(LNode* ins) {
785     ins_ = ins;
786     if (ins->mirRaw()) {
787       lastPC_ = ins->mirRaw()->trackedPc();
788       if (ins->mirRaw()->trackedTree())
789         lastNotInlinedPC_ = ins->mirRaw()->profilerLeavePc();
790     }
791   }
792 
LElementVisitor()793   LElementVisitor()
794       : ins_(nullptr), lastPC_(nullptr), lastNotInlinedPC_(nullptr) {}
795 
796 #define VISIT_INS(op) \
797   void visit##op(L##op*) { MOZ_CRASH("NYI: " #op); }
798   LIR_OPCODE_LIST(VISIT_INS)
799 #undef VISIT_INS
800 };
801 
802 typedef InlineList<LInstruction>::iterator LInstructionIterator;
803 typedef InlineList<LInstruction>::reverse_iterator LInstructionReverseIterator;
804 
805 class MPhi;
806 
807 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
808 // register allocator. Like its equivalent in MIR, phis are collected at the
809 // top of blocks and are meant to be executed in parallel, choosing the input
810 // corresponding to the predecessor taken in the control flow graph.
811 class LPhi final : public LNode {
812   LAllocation* const inputs_;
813   LDefinition def_;
814 
815  public:
LIR_HEADER(Phi)816   LIR_HEADER(Phi)
817 
818   LPhi(MPhi* ins, LAllocation* inputs)
819       : LNode(classOpcode,
820               /* nonPhiNumOperands = */ 0,
821               /* numDefs = */ 1,
822               /* numTemps = */ 0),
823         inputs_(inputs) {
824     setMir(ins);
825   }
826 
getDef(size_t index)827   LDefinition* getDef(size_t index) {
828     MOZ_ASSERT(index == 0);
829     return &def_;
830   }
setDef(size_t index,const LDefinition & def)831   void setDef(size_t index, const LDefinition& def) {
832     MOZ_ASSERT(index == 0);
833     def_ = def;
834   }
numOperands()835   size_t numOperands() const { return mir_->toPhi()->numOperands(); }
getOperand(size_t index)836   LAllocation* getOperand(size_t index) {
837     MOZ_ASSERT(index < numOperands());
838     return &inputs_[index];
839   }
setOperand(size_t index,const LAllocation & a)840   void setOperand(size_t index, const LAllocation& a) {
841     MOZ_ASSERT(index < numOperands());
842     inputs_[index] = a;
843   }
844 
845   // Phis don't have temps, so calling numTemps/getTemp is pointless.
846   size_t numTemps() const = delete;
847   LDefinition* getTemp(size_t index) = delete;
848 };
849 
850 class LMoveGroup;
851 class LBlock {
852   MBasicBlock* block_;
853   FixedList<LPhi> phis_;
854   InlineList<LInstruction> instructions_;
855   LMoveGroup* entryMoveGroup_;
856   LMoveGroup* exitMoveGroup_;
857   Label label_;
858 
859  public:
860   explicit LBlock(MBasicBlock* block);
861   MOZ_MUST_USE bool init(TempAllocator& alloc);
862 
add(LInstruction * ins)863   void add(LInstruction* ins) {
864     ins->setBlock(this);
865     instructions_.pushBack(ins);
866   }
numPhis()867   size_t numPhis() const { return phis_.length(); }
getPhi(size_t index)868   LPhi* getPhi(size_t index) { return &phis_[index]; }
getPhi(size_t index)869   const LPhi* getPhi(size_t index) const { return &phis_[index]; }
mir()870   MBasicBlock* mir() const { return block_; }
begin()871   LInstructionIterator begin() { return instructions_.begin(); }
begin(LInstruction * at)872   LInstructionIterator begin(LInstruction* at) {
873     return instructions_.begin(at);
874   }
end()875   LInstructionIterator end() { return instructions_.end(); }
rbegin()876   LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
rbegin(LInstruction * at)877   LInstructionReverseIterator rbegin(LInstruction* at) {
878     return instructions_.rbegin(at);
879   }
rend()880   LInstructionReverseIterator rend() { return instructions_.rend(); }
instructions()881   InlineList<LInstruction>& instructions() { return instructions_; }
insertAfter(LInstruction * at,LInstruction * ins)882   void insertAfter(LInstruction* at, LInstruction* ins) {
883     instructions_.insertAfter(at, ins);
884   }
insertBefore(LInstruction * at,LInstruction * ins)885   void insertBefore(LInstruction* at, LInstruction* ins) {
886     instructions_.insertBefore(at, ins);
887   }
firstElementWithId()888   const LNode* firstElementWithId() const {
889     return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
890                           : firstInstructionWithId();
891   }
firstId()892   uint32_t firstId() const { return firstElementWithId()->id(); }
lastId()893   uint32_t lastId() const { return lastInstructionWithId()->id(); }
894   const LInstruction* firstInstructionWithId() const;
lastInstructionWithId()895   const LInstruction* lastInstructionWithId() const {
896     const LInstruction* last = *instructions_.rbegin();
897     MOZ_ASSERT(last->id());
898     // The last instruction is a control flow instruction which does not have
899     // any output.
900     MOZ_ASSERT(last->numDefs() == 0);
901     return last;
902   }
903 
904   // Return the label to branch to when branching to this block.
label()905   Label* label() {
906     MOZ_ASSERT(!isTrivial());
907     return &label_;
908   }
909 
910   LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
911   LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
912 
913   // Test whether this basic block is empty except for a simple goto, and
914   // which is not forming a loop. No code will be emitted for such blocks.
isTrivial()915   bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
916 
917   void dump(GenericPrinter& out);
918   void dump();
919 };
920 
921 namespace details {
922 template <size_t Defs, size_t Temps>
923 class LInstructionFixedDefsTempsHelper : public LInstruction {
924   mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
925 
926  protected:
LInstructionFixedDefsTempsHelper(Opcode opcode,uint32_t numOperands)927   LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
928       : LInstruction(opcode, numOperands, Defs, Temps) {}
929 
930  public:
931   // Override the methods in LInstruction with more optimized versions
932   // for when we know the exact instruction type.
getDef(size_t index)933   LDefinition* getDef(size_t index) {
934     MOZ_ASSERT(index < Defs);
935     return &defsAndTemps_[index];
936   }
getTemp(size_t index)937   LDefinition* getTemp(size_t index) {
938     MOZ_ASSERT(index < Temps);
939     return &defsAndTemps_[Defs + index];
940   }
941 
setDef(size_t index,const LDefinition & def)942   void setDef(size_t index, const LDefinition& def) {
943     MOZ_ASSERT(index < Defs);
944     defsAndTemps_[index] = def;
945   }
setTemp(size_t index,const LDefinition & a)946   void setTemp(size_t index, const LDefinition& a) {
947     MOZ_ASSERT(index < Temps);
948     defsAndTemps_[Defs + index] = a;
949   }
setInt64Temp(size_t index,const LInt64Definition & a)950   void setInt64Temp(size_t index, const LInt64Definition& a) {
951 #if JS_BITS_PER_WORD == 32
952     setTemp(index, a.low());
953     setTemp(index + 1, a.high());
954 #else
955     setTemp(index, a.value());
956 #endif
957   }
958 
959   // Default accessors, assuming a single input and output, respectively.
input()960   const LAllocation* input() {
961     MOZ_ASSERT(numOperands() == 1);
962     return getOperand(0);
963   }
output()964   const LDefinition* output() {
965     MOZ_ASSERT(numDefs() == 1);
966     return getDef(0);
967   }
offsetOfDef(size_t index)968   static size_t offsetOfDef(size_t index) {
969     using T = LInstructionFixedDefsTempsHelper<0, 0>;
970     return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
971   }
offsetOfTemp(uint32_t numDefs,uint32_t index)972   static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
973     using T = LInstructionFixedDefsTempsHelper<0, 0>;
974     return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
975   }
976 };
977 }  // namespace details
978 
getDef(size_t index)979 inline LDefinition* LInstruction::getDef(size_t index) {
980   MOZ_ASSERT(index < numDefs());
981   using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
982   uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
983   return reinterpret_cast<LDefinition*>(p);
984 }
985 
getTemp(size_t index)986 inline LDefinition* LInstruction::getTemp(size_t index) {
987   MOZ_ASSERT(index < numTemps());
988   using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
989   uint8_t* p =
990       reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
991   return reinterpret_cast<LDefinition*>(p);
992 }
993 
994 template <size_t Defs, size_t Operands, size_t Temps>
995 class LInstructionHelper
996     : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
997   mozilla::Array<LAllocation, Operands> operands_;
998 
999  protected:
LInstructionHelper(LNode::Opcode opcode)1000   explicit LInstructionHelper(LNode::Opcode opcode)
1001       : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1002                                                                Operands) {
1003     static_assert(
1004         Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
1005         "mozilla::Array should not contain other fields");
1006     if (Operands > 0) {
1007       using T = LInstructionHelper<Defs, Operands, Temps>;
1008       this->initOperandsOffset(offsetof(T, operands_));
1009     }
1010   }
1011 
1012  public:
1013   // Override the methods in LInstruction with more optimized versions
1014   // for when we know the exact instruction type.
getOperand(size_t index)1015   LAllocation* getOperand(size_t index) { return &operands_[index]; }
setOperand(size_t index,const LAllocation & a)1016   void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
setBoxOperand(size_t index,const LBoxAllocation & alloc)1017   void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
1018 #ifdef JS_NUNBOX32
1019     operands_[index + TYPE_INDEX] = alloc.type();
1020     operands_[index + PAYLOAD_INDEX] = alloc.payload();
1021 #else
1022     operands_[index] = alloc.value();
1023 #endif
1024   }
setInt64Operand(size_t index,const LInt64Allocation & alloc)1025   void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
1026 #if JS_BITS_PER_WORD == 32
1027     operands_[index + INT64LOW_INDEX] = alloc.low();
1028     operands_[index + INT64HIGH_INDEX] = alloc.high();
1029 #else
1030     operands_[index] = alloc.value();
1031 #endif
1032   }
getInt64Operand(size_t offset)1033   const LInt64Allocation getInt64Operand(size_t offset) {
1034 #if JS_BITS_PER_WORD == 32
1035     return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
1036                             operands_[offset + INT64LOW_INDEX]);
1037 #else
1038     return LInt64Allocation(operands_[offset]);
1039 #endif
1040   }
1041 };
1042 
1043 template <size_t Defs, size_t Temps>
1044 class LVariadicInstruction
1045     : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1046  protected:
LVariadicInstruction(LNode::Opcode opcode,size_t numOperands)1047   LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
1048       : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1049                                                                numOperands) {}
1050 
1051  public:
setBoxOperand(size_t index,const LBoxAllocation & a)1052   void setBoxOperand(size_t index, const LBoxAllocation& a) {
1053 #ifdef JS_NUNBOX32
1054     this->setOperand(index + TYPE_INDEX, a.type());
1055     this->setOperand(index + PAYLOAD_INDEX, a.payload());
1056 #else
1057     this->setOperand(index, a.value());
1058 #endif
1059   }
1060 };
1061 
1062 template <size_t Defs, size_t Operands, size_t Temps>
1063 class LCallInstructionHelper
1064     : public LInstructionHelper<Defs, Operands, Temps> {
1065  protected:
LCallInstructionHelper(LNode::Opcode opcode)1066   explicit LCallInstructionHelper(LNode::Opcode opcode)
1067       : LInstructionHelper<Defs, Operands, Temps>(opcode) {
1068     this->setIsCall();
1069   }
1070 };
1071 
1072 template <size_t Defs, size_t Temps>
1073 class LBinaryCallInstructionHelper
1074     : public LCallInstructionHelper<Defs, 2, Temps> {
1075  protected:
LBinaryCallInstructionHelper(LNode::Opcode opcode)1076   explicit LBinaryCallInstructionHelper(LNode::Opcode opcode)
1077       : LCallInstructionHelper<Defs, 2, Temps>(opcode) {}
1078 
1079  public:
lhs()1080   const LAllocation* lhs() { return this->getOperand(0); }
rhs()1081   const LAllocation* rhs() { return this->getOperand(1); }
1082 };
1083 
1084 class LRecoverInfo : public TempObject {
1085  public:
1086   typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
1087 
1088  private:
1089   // List of instructions needed to recover the stack frames.
1090   // Outer frames are stored before inner frames.
1091   Instructions instructions_;
1092 
1093   // Cached offset where this resume point is encoded.
1094   RecoverOffset recoverOffset_;
1095 
1096   explicit LRecoverInfo(TempAllocator& alloc);
1097   MOZ_MUST_USE bool init(MResumePoint* mir);
1098 
1099   // Fill the instruction vector such as all instructions needed for the
1100   // recovery are pushed before the current instruction.
1101   template <typename Node>
1102   MOZ_MUST_USE bool appendOperands(Node* ins);
1103   MOZ_MUST_USE bool appendDefinition(MDefinition* def);
1104   MOZ_MUST_USE bool appendResumePoint(MResumePoint* rp);
1105 
1106  public:
1107   static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
1108 
1109   // Resume point of the inner most function.
mir()1110   MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
recoverOffset()1111   RecoverOffset recoverOffset() const { return recoverOffset_; }
setRecoverOffset(RecoverOffset offset)1112   void setRecoverOffset(RecoverOffset offset) {
1113     MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
1114     recoverOffset_ = offset;
1115   }
1116 
begin()1117   MNode** begin() { return instructions_.begin(); }
end()1118   MNode** end() { return instructions_.end(); }
numInstructions()1119   size_t numInstructions() const { return instructions_.length(); }
1120 
1121   class OperandIter {
1122    private:
1123     MNode** it_;
1124     MNode** end_;
1125     size_t op_;
1126     size_t opEnd_;
1127     MResumePoint* rp_;
1128     MNode* node_;
1129 
1130    public:
OperandIter(LRecoverInfo * recoverInfo)1131     explicit OperandIter(LRecoverInfo* recoverInfo)
1132         : it_(recoverInfo->begin()),
1133           end_(recoverInfo->end()),
1134           op_(0),
1135           opEnd_(0),
1136           rp_(nullptr),
1137           node_(nullptr) {
1138       settle();
1139     }
1140 
settle()1141     void settle() {
1142       opEnd_ = (*it_)->numOperands();
1143       while (opEnd_ == 0) {
1144         ++it_;
1145         op_ = 0;
1146         opEnd_ = (*it_)->numOperands();
1147       }
1148       node_ = *it_;
1149       if (node_->isResumePoint()) rp_ = node_->toResumePoint();
1150     }
1151 
1152     MDefinition* operator*() {
1153       if (rp_)  // de-virtualize MResumePoint::getOperand calls.
1154         return rp_->getOperand(op_);
1155       return node_->getOperand(op_);
1156     }
1157     MDefinition* operator->() {
1158       if (rp_)  // de-virtualize MResumePoint::getOperand calls.
1159         return rp_->getOperand(op_);
1160       return node_->getOperand(op_);
1161     }
1162 
1163     OperandIter& operator++() {
1164       ++op_;
1165       if (op_ != opEnd_) return *this;
1166       op_ = 0;
1167       ++it_;
1168       node_ = rp_ = nullptr;
1169       if (!*this) settle();
1170       return *this;
1171     }
1172 
1173     explicit operator bool() const { return it_ == end_; }
1174 
1175 #ifdef DEBUG
1176     bool canOptimizeOutIfUnused();
1177 #endif
1178   };
1179 };
1180 
1181 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
1182 // MResumePoints, they cannot be shared, as they are filled in by the register
1183 // allocator in order to capture the precise low-level stack state in between an
1184 // instruction's input and output. During code generation, LSnapshots are
1185 // compressed and saved in the compiled script.
1186 class LSnapshot : public TempObject {
1187  private:
1188   uint32_t numSlots_;
1189   LAllocation* slots_;
1190   LRecoverInfo* recoverInfo_;
1191   SnapshotOffset snapshotOffset_;
1192   BailoutId bailoutId_;
1193   BailoutKind bailoutKind_;
1194 
1195   LSnapshot(LRecoverInfo* recover, BailoutKind kind);
1196   MOZ_MUST_USE bool init(MIRGenerator* gen);
1197 
1198  public:
1199   static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
1200                         BailoutKind kind);
1201 
numEntries()1202   size_t numEntries() const { return numSlots_; }
numSlots()1203   size_t numSlots() const { return numSlots_ / BOX_PIECES; }
payloadOfSlot(size_t i)1204   LAllocation* payloadOfSlot(size_t i) {
1205     MOZ_ASSERT(i < numSlots());
1206     size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
1207     return getEntry(entryIndex);
1208   }
1209 #ifdef JS_NUNBOX32
typeOfSlot(size_t i)1210   LAllocation* typeOfSlot(size_t i) {
1211     MOZ_ASSERT(i < numSlots());
1212     size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
1213     return getEntry(entryIndex);
1214   }
1215 #endif
getEntry(size_t i)1216   LAllocation* getEntry(size_t i) {
1217     MOZ_ASSERT(i < numSlots_);
1218     return &slots_[i];
1219   }
setEntry(size_t i,const LAllocation & alloc)1220   void setEntry(size_t i, const LAllocation& alloc) {
1221     MOZ_ASSERT(i < numSlots_);
1222     slots_[i] = alloc;
1223   }
recoverInfo()1224   LRecoverInfo* recoverInfo() const { return recoverInfo_; }
mir()1225   MResumePoint* mir() const { return recoverInfo()->mir(); }
snapshotOffset()1226   SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
bailoutId()1227   BailoutId bailoutId() const { return bailoutId_; }
setSnapshotOffset(SnapshotOffset offset)1228   void setSnapshotOffset(SnapshotOffset offset) {
1229     MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
1230     snapshotOffset_ = offset;
1231   }
setBailoutId(BailoutId id)1232   void setBailoutId(BailoutId id) {
1233     MOZ_ASSERT(bailoutId_ == INVALID_BAILOUT_ID);
1234     bailoutId_ = id;
1235   }
bailoutKind()1236   BailoutKind bailoutKind() const { return bailoutKind_; }
1237   void rewriteRecoveredInput(LUse input);
1238 };
1239 
1240 struct SafepointSlotEntry {
1241   // Flag indicating whether this is a slot in the stack or argument space.
1242   uint32_t stack : 1;
1243 
1244   // Byte offset of the slot, as in LStackSlot or LArgument.
1245   uint32_t slot : 31;
1246 
SafepointSlotEntrySafepointSlotEntry1247   SafepointSlotEntry() {}
SafepointSlotEntrySafepointSlotEntry1248   SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
SafepointSlotEntrySafepointSlotEntry1249   explicit SafepointSlotEntry(const LAllocation* a)
1250       : stack(a->isStackSlot()), slot(a->memorySlot()) {}
1251 };
1252 
1253 struct SafepointNunboxEntry {
1254   uint32_t typeVreg;
1255   LAllocation type;
1256   LAllocation payload;
1257 
SafepointNunboxEntrySafepointNunboxEntry1258   SafepointNunboxEntry() {}
SafepointNunboxEntrySafepointNunboxEntry1259   SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
1260       : typeVreg(typeVreg), type(type), payload(payload) {}
1261 };
1262 
1263 class LSafepoint : public TempObject {
1264   typedef SafepointSlotEntry SlotEntry;
1265   typedef SafepointNunboxEntry NunboxEntry;
1266 
1267  public:
1268   typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
1269   typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
1270 
1271  private:
1272   // The information in a safepoint describes the registers and gc related
1273   // values that are live at the start of the associated instruction.
1274 
1275   // The set of registers which are live at an OOL call made within the
1276   // instruction. This includes any registers for inputs which are not
1277   // use-at-start, any registers for temps, and any registers live after the
1278   // call except outputs of the instruction.
1279   //
1280   // For call instructions, the live regs are empty. Call instructions may
1281   // have register inputs or temporaries, which will *not* be in the live
1282   // registers: if passed to the call, the values passed will be marked via
1283   // MarkJitExitFrame, and no registers can be live after the instruction
1284   // except its outputs.
1285   LiveRegisterSet liveRegs_;
1286 
1287   // The subset of liveRegs which contains gcthing pointers.
1288   LiveGeneralRegisterSet gcRegs_;
1289 
1290 #ifdef CHECK_OSIPOINT_REGISTERS
1291   // Clobbered regs of the current instruction. This set is never written to
1292   // the safepoint; it's only used by assertions during compilation.
1293   LiveRegisterSet clobberedRegs_;
1294 #endif
1295 
1296   // Offset to a position in the safepoint stream, or
1297   // INVALID_SAFEPOINT_OFFSET.
1298   uint32_t safepointOffset_;
1299 
1300   // Assembler buffer displacement to OSI point's call location.
1301   uint32_t osiCallPointOffset_;
1302 
1303   // List of slots which have gcthing pointers.
1304   SlotList gcSlots_;
1305 
1306   // List of slots which have Values.
1307   SlotList valueSlots_;
1308 
1309 #ifdef JS_NUNBOX32
1310   // List of registers (in liveRegs) and slots which contain pieces of Values.
1311   NunboxList nunboxParts_;
1312 #elif JS_PUNBOX64
1313   // The subset of liveRegs which have Values.
1314   LiveGeneralRegisterSet valueRegs_;
1315 #endif
1316 
1317   // The subset of liveRegs which contains pointers to slots/elements.
1318   LiveGeneralRegisterSet slotsOrElementsRegs_;
1319 
1320   // List of slots which have slots/elements pointers.
1321   SlotList slotsOrElementsSlots_;
1322 
1323  public:
assertInvariants()1324   void assertInvariants() {
1325   // Every register in valueRegs and gcRegs should also be in liveRegs.
1326 #ifndef JS_NUNBOX32
1327     MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1328 #endif
1329     MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1330   }
1331 
LSafepoint(TempAllocator & alloc)1332   explicit LSafepoint(TempAllocator& alloc)
1333       : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
1334         osiCallPointOffset_(0),
1335         gcSlots_(alloc),
1336         valueSlots_(alloc)
1337 #ifdef JS_NUNBOX32
1338         ,
1339         nunboxParts_(alloc)
1340 #endif
1341         ,
1342         slotsOrElementsSlots_(alloc) {
1343     assertInvariants();
1344   }
addLiveRegister(AnyRegister reg)1345   void addLiveRegister(AnyRegister reg) {
1346     liveRegs_.addUnchecked(reg);
1347     assertInvariants();
1348   }
liveRegs()1349   const LiveRegisterSet& liveRegs() const { return liveRegs_; }
1350 #ifdef CHECK_OSIPOINT_REGISTERS
addClobberedRegister(AnyRegister reg)1351   void addClobberedRegister(AnyRegister reg) {
1352     clobberedRegs_.addUnchecked(reg);
1353     assertInvariants();
1354   }
clobberedRegs()1355   const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
1356 #endif
addGcRegister(Register reg)1357   void addGcRegister(Register reg) {
1358     gcRegs_.addUnchecked(reg);
1359     assertInvariants();
1360   }
gcRegs()1361   LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
addGcSlot(bool stack,uint32_t slot)1362   MOZ_MUST_USE bool addGcSlot(bool stack, uint32_t slot) {
1363     bool result = gcSlots_.append(SlotEntry(stack, slot));
1364     if (result) assertInvariants();
1365     return result;
1366   }
gcSlots()1367   SlotList& gcSlots() { return gcSlots_; }
1368 
slotsOrElementsSlots()1369   SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
slotsOrElementsRegs()1370   LiveGeneralRegisterSet slotsOrElementsRegs() const {
1371     return slotsOrElementsRegs_;
1372   }
addSlotsOrElementsRegister(Register reg)1373   void addSlotsOrElementsRegister(Register reg) {
1374     slotsOrElementsRegs_.addUnchecked(reg);
1375     assertInvariants();
1376   }
addSlotsOrElementsSlot(bool stack,uint32_t slot)1377   MOZ_MUST_USE bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
1378     bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
1379     if (result) assertInvariants();
1380     return result;
1381   }
addSlotsOrElementsPointer(LAllocation alloc)1382   MOZ_MUST_USE bool addSlotsOrElementsPointer(LAllocation alloc) {
1383     if (alloc.isMemory())
1384       return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
1385     MOZ_ASSERT(alloc.isRegister());
1386     addSlotsOrElementsRegister(alloc.toRegister().gpr());
1387     assertInvariants();
1388     return true;
1389   }
hasSlotsOrElementsPointer(LAllocation alloc)1390   bool hasSlotsOrElementsPointer(LAllocation alloc) const {
1391     if (alloc.isRegister())
1392       return slotsOrElementsRegs().has(alloc.toRegister().gpr());
1393     for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
1394       const SlotEntry& entry = slotsOrElementsSlots_[i];
1395       if (entry.stack == alloc.isStackSlot() &&
1396           entry.slot == alloc.memorySlot())
1397         return true;
1398     }
1399     return false;
1400   }
1401 
addGcPointer(LAllocation alloc)1402   MOZ_MUST_USE bool addGcPointer(LAllocation alloc) {
1403     if (alloc.isMemory())
1404       return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
1405     if (alloc.isRegister()) addGcRegister(alloc.toRegister().gpr());
1406     assertInvariants();
1407     return true;
1408   }
1409 
hasGcPointer(LAllocation alloc)1410   bool hasGcPointer(LAllocation alloc) const {
1411     if (alloc.isRegister()) return gcRegs().has(alloc.toRegister().gpr());
1412     MOZ_ASSERT(alloc.isMemory());
1413     for (size_t i = 0; i < gcSlots_.length(); i++) {
1414       if (gcSlots_[i].stack == alloc.isStackSlot() &&
1415           gcSlots_[i].slot == alloc.memorySlot())
1416         return true;
1417     }
1418     return false;
1419   }
1420 
addValueSlot(bool stack,uint32_t slot)1421   MOZ_MUST_USE bool addValueSlot(bool stack, uint32_t slot) {
1422     bool result = valueSlots_.append(SlotEntry(stack, slot));
1423     if (result) assertInvariants();
1424     return result;
1425   }
valueSlots()1426   SlotList& valueSlots() { return valueSlots_; }
1427 
hasValueSlot(bool stack,uint32_t slot)1428   bool hasValueSlot(bool stack, uint32_t slot) const {
1429     for (size_t i = 0; i < valueSlots_.length(); i++) {
1430       if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot)
1431         return true;
1432     }
1433     return false;
1434   }
1435 
1436 #ifdef JS_NUNBOX32
1437 
addNunboxParts(uint32_t typeVreg,LAllocation type,LAllocation payload)1438   MOZ_MUST_USE bool addNunboxParts(uint32_t typeVreg, LAllocation type,
1439                                    LAllocation payload) {
1440     bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
1441     if (result) assertInvariants();
1442     return result;
1443   }
1444 
addNunboxType(uint32_t typeVreg,LAllocation type)1445   MOZ_MUST_USE bool addNunboxType(uint32_t typeVreg, LAllocation type) {
1446     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1447       if (nunboxParts_[i].type == type) return true;
1448       if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
1449         nunboxParts_[i].type = type;
1450         return true;
1451       }
1452     }
1453 
1454     // vregs for nunbox pairs are adjacent, with the type coming first.
1455     uint32_t payloadVreg = typeVreg + 1;
1456     bool result = nunboxParts_.append(
1457         NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
1458     if (result) assertInvariants();
1459     return result;
1460   }
1461 
addNunboxPayload(uint32_t payloadVreg,LAllocation payload)1462   MOZ_MUST_USE bool addNunboxPayload(uint32_t payloadVreg,
1463                                      LAllocation payload) {
1464     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1465       if (nunboxParts_[i].payload == payload) return true;
1466       if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
1467         nunboxParts_[i].payload = payload;
1468         return true;
1469       }
1470     }
1471 
1472     // vregs for nunbox pairs are adjacent, with the type coming first.
1473     uint32_t typeVreg = payloadVreg - 1;
1474     bool result = nunboxParts_.append(
1475         NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
1476     if (result) assertInvariants();
1477     return result;
1478   }
1479 
findTypeAllocation(uint32_t typeVreg)1480   LAllocation findTypeAllocation(uint32_t typeVreg) {
1481     // Look for some allocation for the specified type vreg, to go with a
1482     // partial nunbox entry for the payload. Note that we don't need to
1483     // look at the value slots in the safepoint, as these aren't used by
1484     // register allocators which add partial nunbox entries.
1485     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1486       if (nunboxParts_[i].typeVreg == typeVreg && !nunboxParts_[i].type.isUse())
1487         return nunboxParts_[i].type;
1488     }
1489     return LUse(typeVreg, LUse::ANY);
1490   }
1491 
1492 #ifdef DEBUG
hasNunboxPayload(LAllocation payload)1493   bool hasNunboxPayload(LAllocation payload) const {
1494     if (payload.isMemory() &&
1495         hasValueSlot(payload.isStackSlot(), payload.memorySlot()))
1496       return true;
1497     for (size_t i = 0; i < nunboxParts_.length(); i++) {
1498       if (nunboxParts_[i].payload == payload) return true;
1499     }
1500     return false;
1501   }
1502 #endif
1503 
nunboxParts()1504   NunboxList& nunboxParts() { return nunboxParts_; }
1505 
1506 #elif JS_PUNBOX64
1507 
addValueRegister(Register reg)1508   void addValueRegister(Register reg) {
1509     valueRegs_.add(reg);
1510     assertInvariants();
1511   }
valueRegs()1512   LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
1513 
addBoxedValue(LAllocation alloc)1514   MOZ_MUST_USE bool addBoxedValue(LAllocation alloc) {
1515     if (alloc.isRegister()) {
1516       Register reg = alloc.toRegister().gpr();
1517       if (!valueRegs().has(reg)) addValueRegister(reg);
1518       return true;
1519     }
1520     if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot())) return true;
1521     return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1522   }
1523 
hasBoxedValue(LAllocation alloc)1524   bool hasBoxedValue(LAllocation alloc) const {
1525     if (alloc.isRegister()) return valueRegs().has(alloc.toRegister().gpr());
1526     return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1527   }
1528 
1529 #endif  // JS_PUNBOX64
1530 
encoded()1531   bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
offset()1532   uint32_t offset() const {
1533     MOZ_ASSERT(encoded());
1534     return safepointOffset_;
1535   }
setOffset(uint32_t offset)1536   void setOffset(uint32_t offset) { safepointOffset_ = offset; }
osiReturnPointOffset()1537   uint32_t osiReturnPointOffset() const {
1538     // In general, pointer arithmetic on code is bad, but in this case,
1539     // getting the return address from a call instruction, stepping over pools
1540     // would be wrong.
1541     return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
1542   }
osiCallPointOffset()1543   uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
setOsiCallPointOffset(uint32_t osiCallPointOffset)1544   void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
1545     MOZ_ASSERT(!osiCallPointOffset_);
1546     osiCallPointOffset_ = osiCallPointOffset;
1547   }
1548 };
1549 
1550 class LInstruction::InputIterator {
1551  private:
1552   LInstruction& ins_;
1553   size_t idx_;
1554   bool snapshot_;
1555 
handleOperandsEnd()1556   void handleOperandsEnd() {
1557     // Iterate on the snapshot when iteration over all operands is done.
1558     if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
1559       idx_ = 0;
1560       snapshot_ = true;
1561     }
1562   }
1563 
1564  public:
InputIterator(LInstruction & ins)1565   explicit InputIterator(LInstruction& ins)
1566       : ins_(ins), idx_(0), snapshot_(false) {
1567     handleOperandsEnd();
1568   }
1569 
more()1570   bool more() const {
1571     if (snapshot_) return idx_ < ins_.snapshot()->numEntries();
1572     if (idx_ < ins_.numOperands()) return true;
1573     if (ins_.snapshot() && ins_.snapshot()->numEntries()) return true;
1574     return false;
1575   }
1576 
isSnapshotInput()1577   bool isSnapshotInput() const { return snapshot_; }
1578 
next()1579   void next() {
1580     MOZ_ASSERT(more());
1581     idx_++;
1582     handleOperandsEnd();
1583   }
1584 
replace(const LAllocation & alloc)1585   void replace(const LAllocation& alloc) {
1586     if (snapshot_)
1587       ins_.snapshot()->setEntry(idx_, alloc);
1588     else
1589       ins_.setOperand(idx_, alloc);
1590   }
1591 
1592   LAllocation* operator*() const {
1593     if (snapshot_) return ins_.snapshot()->getEntry(idx_);
1594     return ins_.getOperand(idx_);
1595   }
1596 
1597   LAllocation* operator->() const { return **this; }
1598 };
1599 
1600 class LIRGraph {
1601   struct ValueHasher {
1602     typedef Value Lookup;
hashValueHasher1603     static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
matchValueHasher1604     static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
1605   };
1606 
1607   FixedList<LBlock> blocks_;
1608   Vector<Value, 0, JitAllocPolicy> constantPool_;
1609   typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
1610   ConstantPoolMap constantPoolMap_;
1611   Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
1612   Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
1613   uint32_t numVirtualRegisters_;
1614   uint32_t numInstructions_;
1615 
1616   // Number of stack slots needed for local spills.
1617   uint32_t localSlotCount_;
1618   // Number of stack slots needed for argument construction for calls.
1619   uint32_t argumentSlotCount_;
1620 
1621   // Snapshot taken before any LIR has been lowered.
1622   LSnapshot* entrySnapshot_;
1623 
1624   MIRGraph& mir_;
1625 
1626  public:
1627   explicit LIRGraph(MIRGraph* mir);
1628 
init()1629   MOZ_MUST_USE bool init() {
1630     return constantPoolMap_.init() &&
1631            blocks_.init(mir_.alloc(), mir_.numBlocks());
1632   }
mir()1633   MIRGraph& mir() const { return mir_; }
numBlocks()1634   size_t numBlocks() const { return blocks_.length(); }
getBlock(size_t i)1635   LBlock* getBlock(size_t i) { return &blocks_[i]; }
numBlockIds()1636   uint32_t numBlockIds() const { return mir_.numBlockIds(); }
initBlock(MBasicBlock * mir)1637   MOZ_MUST_USE bool initBlock(MBasicBlock* mir) {
1638     auto* block = &blocks_[mir->id()];
1639     auto* lir = new (block) LBlock(mir);
1640     return lir->init(mir_.alloc());
1641   }
getVirtualRegister()1642   uint32_t getVirtualRegister() {
1643     numVirtualRegisters_ += VREG_INCREMENT;
1644     return numVirtualRegisters_;
1645   }
numVirtualRegisters()1646   uint32_t numVirtualRegisters() const {
1647     // Virtual registers are 1-based, not 0-based, so add one as a
1648     // convenience for 0-based arrays.
1649     return numVirtualRegisters_ + 1;
1650   }
getInstructionId()1651   uint32_t getInstructionId() { return numInstructions_++; }
numInstructions()1652   uint32_t numInstructions() const { return numInstructions_; }
setLocalSlotCount(uint32_t localSlotCount)1653   void setLocalSlotCount(uint32_t localSlotCount) {
1654     localSlotCount_ = localSlotCount;
1655   }
localSlotCount()1656   uint32_t localSlotCount() const { return localSlotCount_; }
1657   // Return the localSlotCount() value rounded up so that it satisfies the
1658   // platform stack alignment requirement, and so that it's a multiple of
1659   // the number of slots per Value.
paddedLocalSlotCount()1660   uint32_t paddedLocalSlotCount() const {
1661     // Round to JitStackAlignment, and implicitly to sizeof(Value) as
1662     // JitStackAlignment is a multiple of sizeof(Value). These alignments
1663     // are needed for spilling SIMD registers properly, and for
1664     // StackOffsetOfPassedArg which rounds argument slots to 8-byte
1665     // boundaries.
1666     return AlignBytes(localSlotCount(), JitStackAlignment);
1667   }
paddedLocalSlotsSize()1668   size_t paddedLocalSlotsSize() const { return paddedLocalSlotCount(); }
setArgumentSlotCount(uint32_t argumentSlotCount)1669   void setArgumentSlotCount(uint32_t argumentSlotCount) {
1670     argumentSlotCount_ = argumentSlotCount;
1671   }
argumentSlotCount()1672   uint32_t argumentSlotCount() const { return argumentSlotCount_; }
argumentsSize()1673   size_t argumentsSize() const { return argumentSlotCount() * sizeof(Value); }
totalSlotCount()1674   uint32_t totalSlotCount() const {
1675     return paddedLocalSlotCount() + argumentsSize();
1676   }
1677   MOZ_MUST_USE bool addConstantToPool(const Value& v, uint32_t* index);
numConstants()1678   size_t numConstants() const { return constantPool_.length(); }
constantPool()1679   Value* constantPool() { return &constantPool_[0]; }
setEntrySnapshot(LSnapshot * snapshot)1680   void setEntrySnapshot(LSnapshot* snapshot) {
1681     MOZ_ASSERT(!entrySnapshot_);
1682     entrySnapshot_ = snapshot;
1683   }
entrySnapshot()1684   LSnapshot* entrySnapshot() const {
1685     MOZ_ASSERT(entrySnapshot_);
1686     return entrySnapshot_;
1687   }
1688   bool noteNeedsSafepoint(LInstruction* ins);
numNonCallSafepoints()1689   size_t numNonCallSafepoints() const { return nonCallSafepoints_.length(); }
getNonCallSafepoint(size_t i)1690   LInstruction* getNonCallSafepoint(size_t i) const {
1691     return nonCallSafepoints_[i];
1692   }
numSafepoints()1693   size_t numSafepoints() const { return safepoints_.length(); }
getSafepoint(size_t i)1694   LInstruction* getSafepoint(size_t i) const { return safepoints_[i]; }
1695 
1696   void dump(GenericPrinter& out);
1697   void dump();
1698 };
1699 
LAllocation(AnyRegister reg)1700 LAllocation::LAllocation(AnyRegister reg) {
1701   if (reg.isFloat())
1702     *this = LFloatReg(reg.fpu());
1703   else
1704     *this = LGeneralReg(reg.gpr());
1705 }
1706 
toRegister()1707 AnyRegister LAllocation::toRegister() const {
1708   MOZ_ASSERT(isRegister());
1709   if (isFloatReg()) return AnyRegister(toFloatReg()->reg());
1710   return AnyRegister(toGeneralReg()->reg());
1711 }
1712 
1713 }  // namespace jit
1714 }  // namespace js
1715 
1716 #include "jit/shared/LIR-shared.h"
1717 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1718 #if defined(JS_CODEGEN_X86)
1719 #include "jit/x86/LIR-x86.h"
1720 #elif defined(JS_CODEGEN_X64)
1721 #include "jit/x64/LIR-x64.h"
1722 #endif
1723 #include "jit/x86-shared/LIR-x86-shared.h"
1724 #elif defined(JS_CODEGEN_ARM)
1725 #include "jit/arm/LIR-arm.h"
1726 #elif defined(JS_CODEGEN_ARM64)
1727 #include "jit/arm64/LIR-arm64.h"
1728 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1729 #if defined(JS_CODEGEN_MIPS32)
1730 #include "jit/mips32/LIR-mips32.h"
1731 #elif defined(JS_CODEGEN_MIPS64)
1732 #include "jit/mips64/LIR-mips64.h"
1733 #endif
1734 #include "jit/mips-shared/LIR-mips-shared.h"
1735 #elif defined(JS_CODEGEN_NONE)
1736 #include "jit/none/LIR-none.h"
1737 #else
1738 #error "Unknown architecture!"
1739 #endif
1740 
1741 #undef LIR_HEADER
1742 
1743 namespace js {
1744 namespace jit {
1745 
1746 #define LIROP(name)                           \
1747   L##name* LNode::to##name() {                \
1748     MOZ_ASSERT(is##name());                   \
1749     return static_cast<L##name*>(this);       \
1750   }                                           \
1751   const L##name* LNode::to##name() const {    \
1752     MOZ_ASSERT(is##name());                   \
1753     return static_cast<const L##name*>(this); \
1754   }
1755 LIR_OPCODE_LIST(LIROP)
1756 #undef LIROP
1757 
1758 #define LALLOC_CAST(type)               \
1759   L##type* LAllocation::to##type() {    \
1760     MOZ_ASSERT(is##type());             \
1761     return static_cast<L##type*>(this); \
1762   }
1763 #define LALLOC_CONST_CAST(type)                  \
1764   const L##type* LAllocation::to##type() const { \
1765     MOZ_ASSERT(is##type());                      \
1766     return static_cast<const L##type*>(this);    \
1767   }
1768 
LALLOC_CAST(Use)1769 LALLOC_CAST(Use)
1770 LALLOC_CONST_CAST(Use)
1771 LALLOC_CONST_CAST(GeneralReg)
1772 LALLOC_CONST_CAST(FloatReg)
1773 LALLOC_CONST_CAST(StackSlot)
1774 LALLOC_CONST_CAST(Argument)
1775 LALLOC_CONST_CAST(ConstantIndex)
1776 
1777 #undef LALLOC_CAST
1778 
1779 #ifdef JS_NUNBOX32
1780 static inline signed OffsetToOtherHalfOfNunbox(LDefinition::Type type) {
1781   MOZ_ASSERT(type == LDefinition::TYPE || type == LDefinition::PAYLOAD);
1782   signed offset = (type == LDefinition::TYPE) ? PAYLOAD_INDEX - TYPE_INDEX
1783                                               : TYPE_INDEX - PAYLOAD_INDEX;
1784   return offset;
1785 }
1786 
AssertTypesFormANunbox(LDefinition::Type type1,LDefinition::Type type2)1787 static inline void AssertTypesFormANunbox(LDefinition::Type type1,
1788                                           LDefinition::Type type2) {
1789   MOZ_ASSERT((type1 == LDefinition::TYPE && type2 == LDefinition::PAYLOAD) ||
1790              (type2 == LDefinition::TYPE && type1 == LDefinition::PAYLOAD));
1791 }
1792 
OffsetOfNunboxSlot(LDefinition::Type type)1793 static inline unsigned OffsetOfNunboxSlot(LDefinition::Type type) {
1794   if (type == LDefinition::PAYLOAD) return NUNBOX32_PAYLOAD_OFFSET;
1795   return NUNBOX32_TYPE_OFFSET;
1796 }
1797 
1798 // Note that stack indexes for LStackSlot are modelled backwards, so a
1799 // double-sized slot starting at 2 has its next word at 1, *not* 3.
BaseOfNunboxSlot(LDefinition::Type type,unsigned slot)1800 static inline unsigned BaseOfNunboxSlot(LDefinition::Type type, unsigned slot) {
1801   if (type == LDefinition::PAYLOAD) return slot + NUNBOX32_PAYLOAD_OFFSET;
1802   return slot + NUNBOX32_TYPE_OFFSET;
1803 }
1804 #endif
1805 
1806 }  // namespace jit
1807 }  // namespace js
1808 
1809 #endif /* jit_LIR_h */
1810