1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef jit_LIR_h
8 #define jit_LIR_h
9
10 // This file declares the core data structures for LIR: storage allocations for
11 // inputs and outputs, as well as the interface instructions must conform to.
12
13 #include "mozilla/Array.h"
14 #include "mozilla/Casting.h"
15
16 #include "jit/Bailouts.h"
17 #include "jit/FixedList.h"
18 #include "jit/InlineList.h"
19 #include "jit/JitAllocPolicy.h"
20 #include "jit/LOpcodesGenerated.h"
21 #include "jit/MIR.h"
22 #include "jit/MIRGraph.h"
23 #include "jit/Registers.h"
24 #include "jit/Safepoints.h"
25 #include "util/Memory.h"
26
27 namespace js {
28 namespace jit {
29
30 class LUse;
31 class LGeneralReg;
32 class LFloatReg;
33 class LStackSlot;
34 class LStackArea;
35 class LArgument;
36 class LConstantIndex;
37 class LInstruction;
38 class LDefinition;
39 class MBasicBlock;
40 class MIRGenerator;
41
42 static const uint32_t VREG_INCREMENT = 1;
43
44 static const uint32_t THIS_FRAME_ARGSLOT = 0;
45
46 #if defined(JS_NUNBOX32)
47 # define BOX_PIECES 2
48 static const uint32_t VREG_TYPE_OFFSET = 0;
49 static const uint32_t VREG_DATA_OFFSET = 1;
50 static const uint32_t TYPE_INDEX = 0;
51 static const uint32_t PAYLOAD_INDEX = 1;
52 static const uint32_t INT64LOW_INDEX = 0;
53 static const uint32_t INT64HIGH_INDEX = 1;
54 #elif defined(JS_PUNBOX64)
55 # define BOX_PIECES 1
56 #else
57 # error "Unknown!"
58 #endif
59
60 static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
61
62 // Represents storage for an operand. For constants, the pointer is tagged
63 // with a single bit, and the untagged pointer is a pointer to a Value.
64 class LAllocation : public TempObject {
65 uintptr_t bits_;
66
67 // 3 bits gives us enough for an interesting set of Kinds and also fits
68 // within the alignment bits of pointers to Value, which are always
69 // 8-byte aligned.
70 static const uintptr_t KIND_BITS = 3;
71 static const uintptr_t KIND_SHIFT = 0;
72 static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
73
74 protected:
75 static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
76 static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
77
78 public:
79 enum Kind {
80 CONSTANT_VALUE, // MConstant*.
81 CONSTANT_INDEX, // Constant arbitrary index.
82 USE, // Use of a virtual register, with physical allocation policy.
83 GPR, // General purpose register.
84 FPU, // Floating-point register.
85 STACK_SLOT, // Stack slot.
86 STACK_AREA, // Stack area.
87 ARGUMENT_SLOT // Argument slot.
88 };
89
90 static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1;
91
92 protected:
data()93 uint32_t data() const {
94 MOZ_ASSERT(!hasIns());
95 return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
96 }
setData(uintptr_t data)97 void setData(uintptr_t data) {
98 MOZ_ASSERT(!hasIns());
99 MOZ_ASSERT(data <= DATA_MASK);
100 bits_ &= ~(DATA_MASK << DATA_SHIFT);
101 bits_ |= (data << DATA_SHIFT);
102 }
setKindAndData(Kind kind,uintptr_t data)103 void setKindAndData(Kind kind, uintptr_t data) {
104 MOZ_ASSERT(data <= DATA_MASK);
105 bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
106 MOZ_ASSERT(!hasIns());
107 }
108
hasIns()109 bool hasIns() const { return isStackArea(); }
ins()110 const LInstruction* ins() const {
111 MOZ_ASSERT(hasIns());
112 return reinterpret_cast<const LInstruction*>(bits_ &
113 ~(KIND_MASK << KIND_SHIFT));
114 }
ins()115 LInstruction* ins() {
116 MOZ_ASSERT(hasIns());
117 return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
118 }
setKindAndIns(Kind kind,LInstruction * ins)119 void setKindAndIns(Kind kind, LInstruction* ins) {
120 uintptr_t data = reinterpret_cast<uintptr_t>(ins);
121 MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
122 bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
123 MOZ_ASSERT(hasIns());
124 }
125
LAllocation(Kind kind,uintptr_t data)126 LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
LAllocation(Kind kind,LInstruction * ins)127 LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
LAllocation(Kind kind)128 explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
129
130 public:
LAllocation()131 LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
132
133 // The MConstant pointer must have its low bits cleared.
LAllocation(const MConstant * c)134 explicit LAllocation(const MConstant* c) {
135 MOZ_ASSERT(c);
136 bits_ = uintptr_t(c);
137 MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
138 bits_ |= CONSTANT_VALUE << KIND_SHIFT;
139 }
140 inline explicit LAllocation(AnyRegister reg);
141
kind()142 Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
143
isBogus()144 bool isBogus() const { return bits_ == 0; }
isUse()145 bool isUse() const { return kind() == USE; }
isConstant()146 bool isConstant() const { return isConstantValue() || isConstantIndex(); }
isConstantValue()147 bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
isConstantIndex()148 bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
isGeneralReg()149 bool isGeneralReg() const { return kind() == GPR; }
isFloatReg()150 bool isFloatReg() const { return kind() == FPU; }
isStackSlot()151 bool isStackSlot() const { return kind() == STACK_SLOT; }
isStackArea()152 bool isStackArea() const { return kind() == STACK_AREA; }
isArgument()153 bool isArgument() const { return kind() == ARGUMENT_SLOT; }
isRegister()154 bool isRegister() const { return isGeneralReg() || isFloatReg(); }
isRegister(bool needFloat)155 bool isRegister(bool needFloat) const {
156 return needFloat ? isFloatReg() : isGeneralReg();
157 }
isMemory()158 bool isMemory() const { return isStackSlot() || isArgument(); }
159 inline uint32_t memorySlot() const;
160 inline LUse* toUse();
161 inline const LUse* toUse() const;
162 inline const LGeneralReg* toGeneralReg() const;
163 inline const LFloatReg* toFloatReg() const;
164 inline const LStackSlot* toStackSlot() const;
165 inline LStackArea* toStackArea();
166 inline const LStackArea* toStackArea() const;
167 inline const LArgument* toArgument() const;
168 inline const LConstantIndex* toConstantIndex() const;
169 inline AnyRegister toRegister() const;
170
toConstant()171 const MConstant* toConstant() const {
172 MOZ_ASSERT(isConstantValue());
173 return reinterpret_cast<const MConstant*>(bits_ &
174 ~(KIND_MASK << KIND_SHIFT));
175 }
176
177 bool operator==(const LAllocation& other) const {
178 return bits_ == other.bits_;
179 }
180
181 bool operator!=(const LAllocation& other) const {
182 return bits_ != other.bits_;
183 }
184
hash()185 HashNumber hash() const { return bits_; }
186
187 bool aliases(const LAllocation& other) const;
188
189 #ifdef JS_JITSPEW
190 UniqueChars toString() const;
191 void dump() const;
192 #endif
193 };
194
195 class LUse : public LAllocation {
196 static const uint32_t POLICY_BITS = 3;
197 static const uint32_t POLICY_SHIFT = 0;
198 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
199 #ifdef JS_CODEGEN_ARM64
200 static const uint32_t REG_BITS = 7;
201 #else
202 static const uint32_t REG_BITS = 6;
203 #endif
204 static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
205 static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
206
207 // Whether the physical register for this operand may be reused for a def.
208 static const uint32_t USED_AT_START_BITS = 1;
209 static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
210 static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
211
212 // The REG field will hold the register code for any Register or
213 // FloatRegister, though not for an AnyRegister.
214 static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
215 REG_MASK + 1,
216 "The field must be able to represent any register code");
217
218 public:
219 // Virtual registers get the remaining bits.
220 static const uint32_t VREG_BITS =
221 DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
222 static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
223 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
224
225 enum Policy {
226 // Input should be in a read-only register or stack slot.
227 ANY,
228
229 // Input must be in a read-only register.
230 REGISTER,
231
232 // Input must be in a specific, read-only register.
233 FIXED,
234
235 // Keep the used virtual register alive, and use whatever allocation is
236 // available. This is similar to ANY but hints to the register allocator
237 // that it is never useful to optimize this site.
238 KEEPALIVE,
239
240 // Input must be allocated on the stack. Only used when extracting stack
241 // results from stack result areas.
242 STACK,
243
244 // For snapshot inputs, indicates that the associated instruction will
245 // write this input to its output register before bailing out.
246 // The register allocator may thus allocate that output register, and
247 // does not need to keep the virtual register alive (alternatively,
248 // this may be treated as KEEPALIVE).
249 RECOVERED_INPUT
250 };
251
set(Policy policy,uint32_t reg,bool usedAtStart)252 void set(Policy policy, uint32_t reg, bool usedAtStart) {
253 MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
254 setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
255 ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
256 }
257
258 public:
259 LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
260 set(policy, 0, usedAtStart);
261 setVirtualRegister(vreg);
262 }
263 explicit LUse(Policy policy, bool usedAtStart = false) {
264 set(policy, 0, usedAtStart);
265 }
266 explicit LUse(Register reg, bool usedAtStart = false) {
267 set(FIXED, reg.code(), usedAtStart);
268 }
269 explicit LUse(FloatRegister reg, bool usedAtStart = false) {
270 set(FIXED, reg.code(), usedAtStart);
271 }
272 LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
273 set(FIXED, reg.code(), usedAtStart);
274 setVirtualRegister(virtualRegister);
275 }
276 LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
277 set(FIXED, reg.code(), usedAtStart);
278 setVirtualRegister(virtualRegister);
279 }
280
setVirtualRegister(uint32_t index)281 void setVirtualRegister(uint32_t index) {
282 MOZ_ASSERT(index < VREG_MASK);
283
284 uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
285 setData(old | (index << VREG_SHIFT));
286 }
287
policy()288 Policy policy() const {
289 Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
290 return policy;
291 }
virtualRegister()292 uint32_t virtualRegister() const {
293 uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
294 MOZ_ASSERT(index != 0);
295 return index;
296 }
registerCode()297 uint32_t registerCode() const {
298 MOZ_ASSERT(policy() == FIXED);
299 return (data() >> REG_SHIFT) & REG_MASK;
300 }
isFixedRegister()301 bool isFixedRegister() const { return policy() == FIXED; }
usedAtStart()302 bool usedAtStart() const {
303 return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
304 }
305 };
306
307 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
308
309 class LBoxAllocation {
310 #ifdef JS_NUNBOX32
311 LAllocation type_;
312 LAllocation payload_;
313 #else
314 LAllocation value_;
315 #endif
316
317 public:
318 #ifdef JS_NUNBOX32
LBoxAllocation(LAllocation type,LAllocation payload)319 LBoxAllocation(LAllocation type, LAllocation payload)
320 : type_(type), payload_(payload) {}
321
type()322 LAllocation type() const { return type_; }
payload()323 LAllocation payload() const { return payload_; }
324 #else
325 explicit LBoxAllocation(LAllocation value) : value_(value) {}
326
327 LAllocation value() const { return value_; }
328 #endif
329 };
330
331 template <class ValT>
332 class LInt64Value {
333 #if JS_BITS_PER_WORD == 32
334 ValT high_;
335 ValT low_;
336 #else
337 ValT value_;
338 #endif
339
340 public:
341 LInt64Value() = default;
342
343 #if JS_BITS_PER_WORD == 32
LInt64Value(ValT high,ValT low)344 LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
345
high()346 ValT high() const { return high_; }
low()347 ValT low() const { return low_; }
348
pointerHigh()349 const ValT* pointerHigh() const { return &high_; }
pointerLow()350 const ValT* pointerLow() const { return &low_; }
351 #else
LInt64Value(ValT value)352 explicit LInt64Value(ValT value) : value_(value) {}
353
value()354 ValT value() const { return value_; }
pointer()355 const ValT* pointer() const { return &value_; }
356 #endif
357 };
358
359 using LInt64Allocation = LInt64Value<LAllocation>;
360
361 class LGeneralReg : public LAllocation {
362 public:
LGeneralReg(Register reg)363 explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
364
reg()365 Register reg() const { return Register::FromCode(data()); }
366 };
367
368 class LFloatReg : public LAllocation {
369 public:
LFloatReg(FloatRegister reg)370 explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
371
reg()372 FloatRegister reg() const { return FloatRegister::FromCode(data()); }
373 };
374
375 // Arbitrary constant index.
376 class LConstantIndex : public LAllocation {
LConstantIndex(uint32_t index)377 explicit LConstantIndex(uint32_t index)
378 : LAllocation(CONSTANT_INDEX, index) {}
379
380 public:
FromIndex(uint32_t index)381 static LConstantIndex FromIndex(uint32_t index) {
382 return LConstantIndex(index);
383 }
384
index()385 uint32_t index() const { return data(); }
386 };
387
388 // Stack slots are indices into the stack. The indices are byte indices.
389 class LStackSlot : public LAllocation {
390 public:
LStackSlot(uint32_t slot)391 explicit LStackSlot(uint32_t slot) : LAllocation(STACK_SLOT, slot) {}
392
slot()393 uint32_t slot() const { return data(); }
394 };
395
396 // Stack area indicates a contiguous stack allocation meant to receive call
397 // results that don't fit in registers.
398 class LStackArea : public LAllocation {
399 public:
LStackArea(LInstruction * stackArea)400 explicit LStackArea(LInstruction* stackArea)
401 : LAllocation(STACK_AREA, stackArea) {}
402
403 // Byte index of base of stack area, in the same coordinate space as
404 // LStackSlot::slot().
405 inline uint32_t base() const;
406 inline void setBase(uint32_t base);
407
408 // Size in bytes of the stack area.
409 inline uint32_t size() const;
alignment()410 inline uint32_t alignment() const { return 8; }
411
412 class ResultIterator {
413 const LStackArea& alloc_;
414 uint32_t idx_;
415
416 public:
ResultIterator(const LStackArea & alloc)417 explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
418
419 inline bool done() const;
420 inline void next();
421 inline LAllocation alloc() const;
422 inline bool isGcPointer() const;
423
424 explicit operator bool() const { return !done(); }
425 };
426
results()427 ResultIterator results() const { return ResultIterator(*this); }
428
429 inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
430 };
431
432 // Arguments are reverse indices into the stack. The indices are byte indices.
433 class LArgument : public LAllocation {
434 public:
LArgument(uint32_t index)435 explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
436
index()437 uint32_t index() const { return data(); }
438 };
439
memorySlot()440 inline uint32_t LAllocation::memorySlot() const {
441 MOZ_ASSERT(isMemory());
442 return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
443 }
444
445 // Represents storage for a definition.
446 class LDefinition {
447 // Bits containing policy, type, and virtual register.
448 uint32_t bits_;
449
450 // Before register allocation, this optionally contains a fixed policy.
451 // Register allocation assigns this field to a physical policy if none is
452 // fixed.
453 //
454 // Right now, pre-allocated outputs are limited to the following:
455 // * Physical argument stack slots.
456 // * Physical registers.
457 LAllocation output_;
458
459 static const uint32_t TYPE_BITS = 4;
460 static const uint32_t TYPE_SHIFT = 0;
461 static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
462 static const uint32_t POLICY_BITS = 2;
463 static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
464 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
465
466 static const uint32_t VREG_BITS =
467 (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
468 static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
469 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
470
471 public:
472 // Note that definitions, by default, are always allocated a register,
473 // unless the policy specifies that an input can be re-used and that input
474 // is a stack slot.
475 enum Policy {
476 // The policy is predetermined by the LAllocation attached to this
477 // definition. The allocation may be:
478 // * A register, which may not appear as any fixed temporary.
479 // * A stack slot or argument.
480 //
481 // Register allocation will not modify a fixed allocation.
482 FIXED,
483
484 // A random register of an appropriate class will be assigned.
485 REGISTER,
486
487 // An area on the stack must be assigned. Used when defining stack results
488 // and stack result areas.
489 STACK,
490
491 // One definition per instruction must re-use the first input
492 // allocation, which (for now) must be a register.
493 MUST_REUSE_INPUT
494 };
495
496 enum Type {
497 GENERAL, // Generic, integer or pointer-width data (GPR).
498 INT32, // int32 data (GPR).
499 OBJECT, // Pointer that may be collected as garbage (GPR).
500 SLOTS, // Slots/elements pointer that may be moved by minor GCs (GPR).
501 FLOAT32, // 32-bit floating-point value (FPU).
502 DOUBLE, // 64-bit floating-point value (FPU).
503 SIMD128, // 128-bit SIMD vector (FPU).
504 STACKRESULTS, // A variable-size stack allocation that may contain objects.
505 #ifdef JS_NUNBOX32
506 // A type virtual register must be followed by a payload virtual
507 // register, as both will be tracked as a single gcthing.
508 TYPE,
509 PAYLOAD
510 #else
511 BOX // Joined box, for punbox systems. (GPR, gcthing)
512 #endif
513 };
514
set(uint32_t index,Type type,Policy policy)515 void set(uint32_t index, Type type, Policy policy) {
516 static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
517 bits_ =
518 (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
519 #ifndef ENABLE_WASM_SIMD
520 MOZ_ASSERT(this->type() != SIMD128);
521 #endif
522 }
523
524 public:
525 LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
526 set(index, type, policy);
527 }
528
529 explicit LDefinition(Type type, Policy policy = REGISTER) {
530 set(0, type, policy);
531 }
532
LDefinition(Type type,const LAllocation & a)533 LDefinition(Type type, const LAllocation& a) : output_(a) {
534 set(0, type, FIXED);
535 }
536
LDefinition(uint32_t index,Type type,const LAllocation & a)537 LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
538 set(index, type, FIXED);
539 }
540
LDefinition()541 LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
542
BogusTemp()543 static LDefinition BogusTemp() { return LDefinition(); }
544
policy()545 Policy policy() const {
546 return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
547 }
type()548 Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
isCompatibleReg(const AnyRegister & r)549 bool isCompatibleReg(const AnyRegister& r) const {
550 if (isFloatReg() && r.isFloat()) {
551 if (type() == FLOAT32) {
552 return r.fpu().isSingle();
553 }
554 if (type() == DOUBLE) {
555 return r.fpu().isDouble();
556 }
557 if (type() == SIMD128) {
558 return r.fpu().isSimd128();
559 }
560 MOZ_CRASH("Unexpected MDefinition type");
561 }
562 return !isFloatReg() && !r.isFloat();
563 }
isCompatibleDef(const LDefinition & other)564 bool isCompatibleDef(const LDefinition& other) const {
565 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
566 if (isFloatReg() && other.isFloatReg()) {
567 return type() == other.type();
568 }
569 return !isFloatReg() && !other.isFloatReg();
570 #else
571 return isFloatReg() == other.isFloatReg();
572 #endif
573 }
574
isFloatReg()575 bool isFloatReg() const {
576 return type() == FLOAT32 || type() == DOUBLE || type() == SIMD128;
577 }
virtualRegister()578 uint32_t virtualRegister() const {
579 uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
580 // MOZ_ASSERT(index != 0);
581 return index;
582 }
output()583 LAllocation* output() { return &output_; }
output()584 const LAllocation* output() const { return &output_; }
isFixed()585 bool isFixed() const { return policy() == FIXED; }
isBogusTemp()586 bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
setVirtualRegister(uint32_t index)587 void setVirtualRegister(uint32_t index) {
588 MOZ_ASSERT(index < VREG_MASK);
589 bits_ &= ~(VREG_MASK << VREG_SHIFT);
590 bits_ |= index << VREG_SHIFT;
591 }
setOutput(const LAllocation & a)592 void setOutput(const LAllocation& a) {
593 output_ = a;
594 if (!a.isUse()) {
595 bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
596 bits_ |= FIXED << POLICY_SHIFT;
597 }
598 }
setReusedInput(uint32_t operand)599 void setReusedInput(uint32_t operand) {
600 output_ = LConstantIndex::FromIndex(operand);
601 }
getReusedInput()602 uint32_t getReusedInput() const {
603 MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
604 return output_.toConstantIndex()->index();
605 }
606
TypeFrom(MIRType type)607 static inline Type TypeFrom(MIRType type) {
608 switch (type) {
609 case MIRType::Boolean:
610 case MIRType::Int32:
611 // The stack slot allocator doesn't currently support allocating
612 // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
613 static_assert(sizeof(bool) <= sizeof(int32_t),
614 "bool doesn't fit in an int32 slot");
615 return LDefinition::INT32;
616 case MIRType::String:
617 case MIRType::Symbol:
618 case MIRType::BigInt:
619 case MIRType::Object:
620 case MIRType::RefOrNull:
621 return LDefinition::OBJECT;
622 case MIRType::Double:
623 return LDefinition::DOUBLE;
624 case MIRType::Float32:
625 return LDefinition::FLOAT32;
626 #if defined(JS_PUNBOX64)
627 case MIRType::Value:
628 return LDefinition::BOX;
629 #endif
630 case MIRType::Slots:
631 case MIRType::Elements:
632 return LDefinition::SLOTS;
633 case MIRType::Pointer:
634 case MIRType::IntPtr:
635 return LDefinition::GENERAL;
636 #if defined(JS_PUNBOX64)
637 case MIRType::Int64:
638 return LDefinition::GENERAL;
639 #endif
640 case MIRType::StackResults:
641 return LDefinition::STACKRESULTS;
642 case MIRType::Simd128:
643 return LDefinition::SIMD128;
644 default:
645 MOZ_CRASH("unexpected type");
646 }
647 }
648
649 UniqueChars toString() const;
650
651 #ifdef JS_JITSPEW
652 void dump() const;
653 #endif
654 };
655
656 class LInt64Definition : public LInt64Value<LDefinition> {
657 public:
658 using LInt64Value<LDefinition>::LInt64Value;
659
BogusTemp()660 static LInt64Definition BogusTemp() { return LInt64Definition(); }
661
isBogusTemp()662 bool isBogusTemp() const {
663 #if JS_BITS_PER_WORD == 32
664 MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
665 return high().isBogusTemp();
666 #else
667 return value().isBogusTemp();
668 #endif
669 }
670 };
671
672 // Forward declarations of LIR types.
673 #define LIROP(op) class L##op;
674 LIR_OPCODE_LIST(LIROP)
675 #undef LIROP
676
677 class LSnapshot;
678 class LSafepoint;
679 class LElementVisitor;
680
681 constexpr size_t MaxNumLInstructionOperands = 63;
682
683 // The common base class for LPhi and LInstruction.
684 class LNode {
685 protected:
686 MDefinition* mir_;
687
688 private:
689 LBlock* block_;
690 uint32_t id_;
691
692 protected:
693 // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
694 uint32_t op_ : 10;
695 uint32_t isCall_ : 1;
696
697 // LPhi::numOperands() may not fit in this bitfield, so we only use this
698 // field for LInstruction.
699 uint32_t nonPhiNumOperands_ : 6;
700 static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
701 "packing constraints");
702
703 // For LInstruction, the first operand is stored at offset
704 // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
705 uint32_t nonPhiOperandsOffset_ : 5;
706 uint32_t numDefs_ : 4;
707 uint32_t numTemps_ : 4;
708
709 public:
710 enum class Opcode {
711 #define LIROP(name) name,
712 LIR_OPCODE_LIST(LIROP)
713 #undef LIROP
714 Invalid
715 };
716
LNode(Opcode op,uint32_t nonPhiNumOperands,uint32_t numDefs,uint32_t numTemps)717 LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
718 uint32_t numTemps)
719 : mir_(nullptr),
720 block_(nullptr),
721 id_(0),
722 op_(uint32_t(op)),
723 isCall_(false),
724 nonPhiNumOperands_(nonPhiNumOperands),
725 nonPhiOperandsOffset_(0),
726 numDefs_(numDefs),
727 numTemps_(numTemps) {
728 MOZ_ASSERT(op < Opcode::Invalid);
729 MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
730 MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
731 "nonPhiNumOperands must fit in bitfield");
732 MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
733 MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
734 }
735
opName()736 const char* opName() {
737 switch (op()) {
738 #define LIR_NAME_INS(name) \
739 case Opcode::name: \
740 return #name;
741 LIR_OPCODE_LIST(LIR_NAME_INS)
742 #undef LIR_NAME_INS
743 default:
744 MOZ_CRASH("Invalid op");
745 }
746 }
747
748 // Hook for opcodes to add extra high level detail about what code will be
749 // emitted for the op.
750 private:
extraName()751 const char* extraName() const { return nullptr; }
752
753 public:
754 #ifdef JS_JITSPEW
755 const char* getExtraName() const;
756 #endif
757
op()758 Opcode op() const { return Opcode(op_); }
759
isInstruction()760 bool isInstruction() const { return op() != Opcode::Phi; }
761 inline LInstruction* toInstruction();
762 inline const LInstruction* toInstruction() const;
763
764 // Returns the number of outputs of this instruction. If an output is
765 // unallocated, it is an LDefinition, defining a virtual register.
numDefs()766 size_t numDefs() const { return numDefs_; }
767
isCall()768 bool isCall() const { return isCall_; }
769
770 // Does this call preserve the given register?
771 // By default, it is assumed that all registers are clobbered by a call.
772 inline bool isCallPreserved(AnyRegister reg) const;
773
id()774 uint32_t id() const { return id_; }
setId(uint32_t id)775 void setId(uint32_t id) {
776 MOZ_ASSERT(!id_);
777 MOZ_ASSERT(id);
778 id_ = id;
779 }
setMir(MDefinition * mir)780 void setMir(MDefinition* mir) { mir_ = mir; }
mirRaw()781 MDefinition* mirRaw() const {
782 /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
783 return mir_;
784 }
block()785 LBlock* block() const { return block_; }
setBlock(LBlock * block)786 void setBlock(LBlock* block) { block_ = block; }
787
788 // For an instruction which has a MUST_REUSE_INPUT output, whether that
789 // output register will be restored to its original value when bailing out.
790 inline bool recoversInput() const;
791
792 #ifdef JS_JITSPEW
793 void dump(GenericPrinter& out);
794 void dump();
795 static void printName(GenericPrinter& out, Opcode op);
796 void printName(GenericPrinter& out);
797 void printOperands(GenericPrinter& out);
798 #endif
799
800 public:
801 // Opcode testing and casts.
802 #define LIROP(name) \
803 bool is##name() const { return op() == Opcode::name; } \
804 inline L##name* to##name(); \
805 inline const L##name* to##name() const;
806 LIR_OPCODE_LIST(LIROP)
807 #undef LIROP
808
809 // Note: GenerateOpcodeFiles.py generates LOpcodesGenerated.h based on this
810 // macro.
811 #define LIR_HEADER(opcode) \
812 static constexpr LNode::Opcode classOpcode = LNode::Opcode::opcode;
813 };
814
815 class LInstruction : public LNode,
816 public TempObject,
817 public InlineListNode<LInstruction> {
818 // This snapshot could be set after a ResumePoint. It is used to restart
819 // from the resume point pc.
820 LSnapshot* snapshot_;
821
822 // Structure capturing the set of stack slots and registers which are known
823 // to hold either gcthings or Values.
824 LSafepoint* safepoint_;
825
826 LMoveGroup* inputMoves_;
827 LMoveGroup* fixReuseMoves_;
828 LMoveGroup* movesAfter_;
829
830 protected:
LInstruction(Opcode opcode,uint32_t numOperands,uint32_t numDefs,uint32_t numTemps)831 LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
832 uint32_t numTemps)
833 : LNode(opcode, numOperands, numDefs, numTemps),
834 snapshot_(nullptr),
835 safepoint_(nullptr),
836 inputMoves_(nullptr),
837 fixReuseMoves_(nullptr),
838 movesAfter_(nullptr) {}
839
setIsCall()840 void setIsCall() { isCall_ = true; }
841
842 public:
843 inline LDefinition* getDef(size_t index);
844
setDef(size_t index,const LDefinition & def)845 void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
846
getOperand(size_t index)847 LAllocation* getOperand(size_t index) const {
848 MOZ_ASSERT(index < numOperands());
849 MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
850 uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
851 nonPhiOperandsOffset_ * sizeof(uintptr_t);
852 return reinterpret_cast<LAllocation*>(p) + index;
853 }
setOperand(size_t index,const LAllocation & a)854 void setOperand(size_t index, const LAllocation& a) {
855 *getOperand(index) = a;
856 }
857
initOperandsOffset(size_t offset)858 void initOperandsOffset(size_t offset) {
859 MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
860 MOZ_ASSERT(offset >= sizeof(LInstruction));
861 MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
862 offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
863 nonPhiOperandsOffset_ = offset;
864 MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
865 }
866
867 // Returns information about temporary registers needed. Each temporary
868 // register is an LDefinition with a fixed or virtual register and
869 // either GENERAL, FLOAT32, or DOUBLE type.
numTemps()870 size_t numTemps() const { return numTemps_; }
871 inline LDefinition* getTemp(size_t index);
872
snapshot()873 LSnapshot* snapshot() const { return snapshot_; }
safepoint()874 LSafepoint* safepoint() const { return safepoint_; }
inputMoves()875 LMoveGroup* inputMoves() const { return inputMoves_; }
setInputMoves(LMoveGroup * moves)876 void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
fixReuseMoves()877 LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
setFixReuseMoves(LMoveGroup * moves)878 void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
movesAfter()879 LMoveGroup* movesAfter() const { return movesAfter_; }
setMovesAfter(LMoveGroup * moves)880 void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
numOperands()881 uint32_t numOperands() const { return nonPhiNumOperands_; }
882 void assignSnapshot(LSnapshot* snapshot);
883 void initSafepoint(TempAllocator& alloc);
884
885 class InputIterator;
886 };
887
toInstruction()888 LInstruction* LNode::toInstruction() {
889 MOZ_ASSERT(isInstruction());
890 return static_cast<LInstruction*>(this);
891 }
892
toInstruction()893 const LInstruction* LNode::toInstruction() const {
894 MOZ_ASSERT(isInstruction());
895 return static_cast<const LInstruction*>(this);
896 }
897
898 class LElementVisitor {
899 #ifdef TRACK_SNAPSHOTS
900 LInstruction* ins_ = nullptr;
901 #endif
902
903 protected:
904 #ifdef TRACK_SNAPSHOTS
instruction()905 LInstruction* instruction() { return ins_; }
906
setElement(LInstruction * ins)907 void setElement(LInstruction* ins) { ins_ = ins; }
908 #else
909 void setElement(LInstruction* ins) {}
910 #endif
911 };
912
913 using LInstructionIterator = InlineList<LInstruction>::iterator;
914 using LInstructionReverseIterator = InlineList<LInstruction>::reverse_iterator;
915
916 class MPhi;
917
918 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
919 // register allocator. Like its equivalent in MIR, phis are collected at the
920 // top of blocks and are meant to be executed in parallel, choosing the input
921 // corresponding to the predecessor taken in the control flow graph.
922 class LPhi final : public LNode {
923 LAllocation* const inputs_;
924 LDefinition def_;
925
926 public:
LIR_HEADER(Phi)927 LIR_HEADER(Phi)
928
929 LPhi(MPhi* ins, LAllocation* inputs)
930 : LNode(classOpcode,
931 /* nonPhiNumOperands = */ 0,
932 /* numDefs = */ 1,
933 /* numTemps = */ 0),
934 inputs_(inputs) {
935 setMir(ins);
936 }
937
getDef(size_t index)938 LDefinition* getDef(size_t index) {
939 MOZ_ASSERT(index == 0);
940 return &def_;
941 }
setDef(size_t index,const LDefinition & def)942 void setDef(size_t index, const LDefinition& def) {
943 MOZ_ASSERT(index == 0);
944 def_ = def;
945 }
numOperands()946 size_t numOperands() const { return mir_->toPhi()->numOperands(); }
getOperand(size_t index)947 LAllocation* getOperand(size_t index) {
948 MOZ_ASSERT(index < numOperands());
949 return &inputs_[index];
950 }
setOperand(size_t index,const LAllocation & a)951 void setOperand(size_t index, const LAllocation& a) {
952 MOZ_ASSERT(index < numOperands());
953 inputs_[index] = a;
954 }
955
956 // Phis don't have temps, so calling numTemps/getTemp is pointless.
957 size_t numTemps() const = delete;
958 LDefinition* getTemp(size_t index) = delete;
959 };
960
961 class LMoveGroup;
962 class LBlock {
963 MBasicBlock* block_;
964 FixedList<LPhi> phis_;
965 InlineList<LInstruction> instructions_;
966 LMoveGroup* entryMoveGroup_;
967 LMoveGroup* exitMoveGroup_;
968 Label label_;
969
970 public:
971 explicit LBlock(MBasicBlock* block);
972 [[nodiscard]] bool init(TempAllocator& alloc);
973
add(LInstruction * ins)974 void add(LInstruction* ins) {
975 ins->setBlock(this);
976 instructions_.pushBack(ins);
977 }
numPhis()978 size_t numPhis() const { return phis_.length(); }
getPhi(size_t index)979 LPhi* getPhi(size_t index) { return &phis_[index]; }
getPhi(size_t index)980 const LPhi* getPhi(size_t index) const { return &phis_[index]; }
mir()981 MBasicBlock* mir() const { return block_; }
begin()982 LInstructionIterator begin() { return instructions_.begin(); }
begin(LInstruction * at)983 LInstructionIterator begin(LInstruction* at) {
984 return instructions_.begin(at);
985 }
end()986 LInstructionIterator end() { return instructions_.end(); }
rbegin()987 LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
rbegin(LInstruction * at)988 LInstructionReverseIterator rbegin(LInstruction* at) {
989 return instructions_.rbegin(at);
990 }
rend()991 LInstructionReverseIterator rend() { return instructions_.rend(); }
instructions()992 InlineList<LInstruction>& instructions() { return instructions_; }
insertAfter(LInstruction * at,LInstruction * ins)993 void insertAfter(LInstruction* at, LInstruction* ins) {
994 instructions_.insertAfter(at, ins);
995 }
insertBefore(LInstruction * at,LInstruction * ins)996 void insertBefore(LInstruction* at, LInstruction* ins) {
997 instructions_.insertBefore(at, ins);
998 }
firstElementWithId()999 const LNode* firstElementWithId() const {
1000 return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
1001 : firstInstructionWithId();
1002 }
firstId()1003 uint32_t firstId() const { return firstElementWithId()->id(); }
lastId()1004 uint32_t lastId() const { return lastInstructionWithId()->id(); }
1005 const LInstruction* firstInstructionWithId() const;
lastInstructionWithId()1006 const LInstruction* lastInstructionWithId() const {
1007 const LInstruction* last = *instructions_.rbegin();
1008 MOZ_ASSERT(last->id());
1009 // The last instruction is a control flow instruction which does not have
1010 // any output.
1011 MOZ_ASSERT(last->numDefs() == 0);
1012 return last;
1013 }
1014
1015 // Return the label to branch to when branching to this block.
label()1016 Label* label() {
1017 MOZ_ASSERT(!isTrivial());
1018 return &label_;
1019 }
1020
1021 LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
1022 LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
1023
1024 // Test whether this basic block is empty except for a simple goto, and
1025 // which is not forming a loop. No code will be emitted for such blocks.
isTrivial()1026 bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
1027
1028 #ifdef JS_JITSPEW
1029 void dump(GenericPrinter& out);
1030 void dump();
1031 #endif
1032 };
1033
1034 namespace details {
1035 template <size_t Defs, size_t Temps>
1036 class LInstructionFixedDefsTempsHelper : public LInstruction {
1037 mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
1038
1039 protected:
LInstructionFixedDefsTempsHelper(Opcode opcode,uint32_t numOperands)1040 LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
1041 : LInstruction(opcode, numOperands, Defs, Temps) {}
1042
1043 public:
1044 // Override the methods in LInstruction with more optimized versions
1045 // for when we know the exact instruction type.
getDef(size_t index)1046 LDefinition* getDef(size_t index) {
1047 MOZ_ASSERT(index < Defs);
1048 return &defsAndTemps_[index];
1049 }
getTemp(size_t index)1050 LDefinition* getTemp(size_t index) {
1051 MOZ_ASSERT(index < Temps);
1052 return &defsAndTemps_[Defs + index];
1053 }
getInt64Temp(size_t index)1054 LInt64Definition getInt64Temp(size_t index) {
1055 MOZ_ASSERT(index + INT64_PIECES <= Temps);
1056 #if JS_BITS_PER_WORD == 32
1057 return LInt64Definition(defsAndTemps_[Defs + index + INT64HIGH_INDEX],
1058 defsAndTemps_[Defs + index + INT64LOW_INDEX]);
1059 #else
1060 return LInt64Definition(defsAndTemps_[Defs + index]);
1061 #endif
1062 }
1063
setDef(size_t index,const LDefinition & def)1064 void setDef(size_t index, const LDefinition& def) {
1065 MOZ_ASSERT(index < Defs);
1066 defsAndTemps_[index] = def;
1067 }
setTemp(size_t index,const LDefinition & a)1068 void setTemp(size_t index, const LDefinition& a) {
1069 MOZ_ASSERT(index < Temps);
1070 defsAndTemps_[Defs + index] = a;
1071 }
setInt64Temp(size_t index,const LInt64Definition & a)1072 void setInt64Temp(size_t index, const LInt64Definition& a) {
1073 #if JS_BITS_PER_WORD == 32
1074 setTemp(index, a.low());
1075 setTemp(index + 1, a.high());
1076 #else
1077 setTemp(index, a.value());
1078 #endif
1079 }
1080
1081 // Default accessors, assuming a single input and output, respectively.
input()1082 const LAllocation* input() {
1083 MOZ_ASSERT(numOperands() == 1);
1084 return getOperand(0);
1085 }
output()1086 const LDefinition* output() {
1087 MOZ_ASSERT(numDefs() == 1);
1088 return getDef(0);
1089 }
offsetOfDef(size_t index)1090 static size_t offsetOfDef(size_t index) {
1091 using T = LInstructionFixedDefsTempsHelper<0, 0>;
1092 return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
1093 }
offsetOfTemp(uint32_t numDefs,uint32_t index)1094 static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
1095 using T = LInstructionFixedDefsTempsHelper<0, 0>;
1096 return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
1097 }
1098 };
1099 } // namespace details
1100
getDef(size_t index)1101 inline LDefinition* LInstruction::getDef(size_t index) {
1102 MOZ_ASSERT(index < numDefs());
1103 using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1104 uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
1105 return reinterpret_cast<LDefinition*>(p);
1106 }
1107
getTemp(size_t index)1108 inline LDefinition* LInstruction::getTemp(size_t index) {
1109 MOZ_ASSERT(index < numTemps());
1110 using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
1111 uint8_t* p =
1112 reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
1113 return reinterpret_cast<LDefinition*>(p);
1114 }
1115
1116 template <size_t Defs, size_t Operands, size_t Temps>
1117 class LInstructionHelper
1118 : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1119 mozilla::Array<LAllocation, Operands> operands_;
1120
1121 protected:
LInstructionHelper(LNode::Opcode opcode)1122 explicit LInstructionHelper(LNode::Opcode opcode)
1123 : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1124 Operands) {
1125 static_assert(
1126 Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
1127 "mozilla::Array should not contain other fields");
1128 if (Operands > 0) {
1129 using T = LInstructionHelper<Defs, Operands, Temps>;
1130 this->initOperandsOffset(offsetof(T, operands_));
1131 }
1132 }
1133
1134 public:
1135 // Override the methods in LInstruction with more optimized versions
1136 // for when we know the exact instruction type.
getOperand(size_t index)1137 LAllocation* getOperand(size_t index) { return &operands_[index]; }
setOperand(size_t index,const LAllocation & a)1138 void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
setBoxOperand(size_t index,const LBoxAllocation & alloc)1139 void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
1140 #ifdef JS_NUNBOX32
1141 operands_[index + TYPE_INDEX] = alloc.type();
1142 operands_[index + PAYLOAD_INDEX] = alloc.payload();
1143 #else
1144 operands_[index] = alloc.value();
1145 #endif
1146 }
setInt64Operand(size_t index,const LInt64Allocation & alloc)1147 void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
1148 #if JS_BITS_PER_WORD == 32
1149 operands_[index + INT64LOW_INDEX] = alloc.low();
1150 operands_[index + INT64HIGH_INDEX] = alloc.high();
1151 #else
1152 operands_[index] = alloc.value();
1153 #endif
1154 }
getInt64Operand(size_t offset)1155 const LInt64Allocation getInt64Operand(size_t offset) {
1156 #if JS_BITS_PER_WORD == 32
1157 return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
1158 operands_[offset + INT64LOW_INDEX]);
1159 #else
1160 return LInt64Allocation(operands_[offset]);
1161 #endif
1162 }
1163 };
1164
1165 template <size_t Defs, size_t Temps>
1166 class LVariadicInstruction
1167 : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
1168 protected:
LVariadicInstruction(LNode::Opcode opcode,size_t numOperands)1169 LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
1170 : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
1171 numOperands) {}
1172
1173 public:
setBoxOperand(size_t index,const LBoxAllocation & a)1174 void setBoxOperand(size_t index, const LBoxAllocation& a) {
1175 #ifdef JS_NUNBOX32
1176 this->setOperand(index + TYPE_INDEX, a.type());
1177 this->setOperand(index + PAYLOAD_INDEX, a.payload());
1178 #else
1179 this->setOperand(index, a.value());
1180 #endif
1181 }
1182 };
1183
1184 template <size_t Defs, size_t Operands, size_t Temps>
1185 class LCallInstructionHelper
1186 : public LInstructionHelper<Defs, Operands, Temps> {
1187 protected:
LCallInstructionHelper(LNode::Opcode opcode)1188 explicit LCallInstructionHelper(LNode::Opcode opcode)
1189 : LInstructionHelper<Defs, Operands, Temps>(opcode) {
1190 this->setIsCall();
1191 }
1192 };
1193
1194 template <size_t Defs, size_t Temps>
1195 class LBinaryCallInstructionHelper
1196 : public LCallInstructionHelper<Defs, 2, Temps> {
1197 protected:
LBinaryCallInstructionHelper(LNode::Opcode opcode)1198 explicit LBinaryCallInstructionHelper(LNode::Opcode opcode)
1199 : LCallInstructionHelper<Defs, 2, Temps>(opcode) {}
1200
1201 public:
lhs()1202 const LAllocation* lhs() { return this->getOperand(0); }
rhs()1203 const LAllocation* rhs() { return this->getOperand(1); }
1204 };
1205
1206 class LRecoverInfo : public TempObject {
1207 public:
1208 typedef Vector<MNode*, 2, JitAllocPolicy> Instructions;
1209
1210 private:
1211 // List of instructions needed to recover the stack frames.
1212 // Outer frames are stored before inner frames.
1213 Instructions instructions_;
1214
1215 // Cached offset where this resume point is encoded.
1216 RecoverOffset recoverOffset_;
1217
1218 explicit LRecoverInfo(TempAllocator& alloc);
1219 [[nodiscard]] bool init(MResumePoint* mir);
1220
1221 // Fill the instruction vector such as all instructions needed for the
1222 // recovery are pushed before the current instruction.
1223 template <typename Node>
1224 [[nodiscard]] bool appendOperands(Node* ins);
1225 [[nodiscard]] bool appendDefinition(MDefinition* def);
1226 [[nodiscard]] bool appendResumePoint(MResumePoint* rp);
1227
1228 public:
1229 static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
1230
1231 // Resume point of the inner most function.
mir()1232 MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
recoverOffset()1233 RecoverOffset recoverOffset() const { return recoverOffset_; }
setRecoverOffset(RecoverOffset offset)1234 void setRecoverOffset(RecoverOffset offset) {
1235 MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
1236 recoverOffset_ = offset;
1237 }
1238
begin()1239 MNode** begin() { return instructions_.begin(); }
end()1240 MNode** end() { return instructions_.end(); }
numInstructions()1241 size_t numInstructions() const { return instructions_.length(); }
1242
1243 class OperandIter {
1244 private:
1245 MNode** it_;
1246 MNode** end_;
1247 size_t op_;
1248 size_t opEnd_;
1249 MResumePoint* rp_;
1250 MNode* node_;
1251
1252 public:
OperandIter(LRecoverInfo * recoverInfo)1253 explicit OperandIter(LRecoverInfo* recoverInfo)
1254 : it_(recoverInfo->begin()),
1255 end_(recoverInfo->end()),
1256 op_(0),
1257 opEnd_(0),
1258 rp_(nullptr),
1259 node_(nullptr) {
1260 settle();
1261 }
1262
settle()1263 void settle() {
1264 opEnd_ = (*it_)->numOperands();
1265 while (opEnd_ == 0) {
1266 ++it_;
1267 op_ = 0;
1268 opEnd_ = (*it_)->numOperands();
1269 }
1270 node_ = *it_;
1271 if (node_->isResumePoint()) {
1272 rp_ = node_->toResumePoint();
1273 }
1274 }
1275
1276 MDefinition* operator*() {
1277 if (rp_) { // de-virtualize MResumePoint::getOperand calls.
1278 return rp_->getOperand(op_);
1279 }
1280 return node_->getOperand(op_);
1281 }
1282 MDefinition* operator->() {
1283 if (rp_) { // de-virtualize MResumePoint::getOperand calls.
1284 return rp_->getOperand(op_);
1285 }
1286 return node_->getOperand(op_);
1287 }
1288
1289 OperandIter& operator++() {
1290 ++op_;
1291 if (op_ != opEnd_) {
1292 return *this;
1293 }
1294 op_ = 0;
1295 ++it_;
1296 node_ = rp_ = nullptr;
1297 if (!*this) {
1298 settle();
1299 }
1300 return *this;
1301 }
1302
1303 explicit operator bool() const { return it_ == end_; }
1304
1305 #ifdef DEBUG
1306 bool canOptimizeOutIfUnused();
1307 #endif
1308 };
1309 };
1310
1311 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
1312 // MResumePoints, they cannot be shared, as they are filled in by the register
1313 // allocator in order to capture the precise low-level stack state in between an
1314 // instruction's input and output. During code generation, LSnapshots are
1315 // compressed and saved in the compiled script.
1316 class LSnapshot : public TempObject {
1317 private:
1318 LAllocation* slots_;
1319 LRecoverInfo* recoverInfo_;
1320 SnapshotOffset snapshotOffset_;
1321 uint32_t numSlots_;
1322 BailoutId bailoutId_;
1323 BailoutKind bailoutKind_;
1324
1325 LSnapshot(LRecoverInfo* recover, BailoutKind kind);
1326 [[nodiscard]] bool init(MIRGenerator* gen);
1327
1328 public:
1329 static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
1330 BailoutKind kind);
1331
numEntries()1332 size_t numEntries() const { return numSlots_; }
numSlots()1333 size_t numSlots() const { return numSlots_ / BOX_PIECES; }
payloadOfSlot(size_t i)1334 LAllocation* payloadOfSlot(size_t i) {
1335 MOZ_ASSERT(i < numSlots());
1336 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
1337 return getEntry(entryIndex);
1338 }
1339 #ifdef JS_NUNBOX32
typeOfSlot(size_t i)1340 LAllocation* typeOfSlot(size_t i) {
1341 MOZ_ASSERT(i < numSlots());
1342 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
1343 return getEntry(entryIndex);
1344 }
1345 #endif
getEntry(size_t i)1346 LAllocation* getEntry(size_t i) {
1347 MOZ_ASSERT(i < numSlots_);
1348 return &slots_[i];
1349 }
setEntry(size_t i,const LAllocation & alloc)1350 void setEntry(size_t i, const LAllocation& alloc) {
1351 MOZ_ASSERT(i < numSlots_);
1352 slots_[i] = alloc;
1353 }
recoverInfo()1354 LRecoverInfo* recoverInfo() const { return recoverInfo_; }
mir()1355 MResumePoint* mir() const { return recoverInfo()->mir(); }
snapshotOffset()1356 SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
bailoutId()1357 BailoutId bailoutId() const { return bailoutId_; }
setSnapshotOffset(SnapshotOffset offset)1358 void setSnapshotOffset(SnapshotOffset offset) {
1359 MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
1360 snapshotOffset_ = offset;
1361 }
setBailoutId(BailoutId id)1362 void setBailoutId(BailoutId id) {
1363 MOZ_ASSERT(bailoutId_ == INVALID_BAILOUT_ID);
1364 bailoutId_ = id;
1365 }
bailoutKind()1366 BailoutKind bailoutKind() const { return bailoutKind_; }
1367 void rewriteRecoveredInput(LUse input);
1368 };
1369
1370 struct SafepointSlotEntry {
1371 // Flag indicating whether this is a slot in the stack or argument space.
1372 uint32_t stack : 1;
1373
1374 // Byte offset of the slot, as in LStackSlot or LArgument.
1375 uint32_t slot : 31;
1376
SafepointSlotEntrySafepointSlotEntry1377 SafepointSlotEntry() : stack(0), slot(0) {}
SafepointSlotEntrySafepointSlotEntry1378 SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
SafepointSlotEntrySafepointSlotEntry1379 explicit SafepointSlotEntry(const LAllocation* a)
1380 : stack(a->isStackSlot()), slot(a->memorySlot()) {}
1381 };
1382
1383 struct SafepointNunboxEntry {
1384 uint32_t typeVreg;
1385 LAllocation type;
1386 LAllocation payload;
1387
SafepointNunboxEntrySafepointNunboxEntry1388 SafepointNunboxEntry() : typeVreg(0) {}
SafepointNunboxEntrySafepointNunboxEntry1389 SafepointNunboxEntry(uint32_t typeVreg, LAllocation type, LAllocation payload)
1390 : typeVreg(typeVreg), type(type), payload(payload) {}
1391 };
1392
1393 class LSafepoint : public TempObject {
1394 using SlotEntry = SafepointSlotEntry;
1395 using NunboxEntry = SafepointNunboxEntry;
1396
1397 public:
1398 typedef Vector<SlotEntry, 0, JitAllocPolicy> SlotList;
1399 typedef Vector<NunboxEntry, 0, JitAllocPolicy> NunboxList;
1400
1401 private:
1402 // The information in a safepoint describes the registers and gc related
1403 // values that are live at the start of the associated instruction.
1404
1405 // The set of registers which are live at an OOL call made within the
1406 // instruction. This includes any registers for inputs which are not
1407 // use-at-start, any registers for temps, and any registers live after the
1408 // call except outputs of the instruction.
1409 //
1410 // For call instructions, the live regs are empty. Call instructions may
1411 // have register inputs or temporaries, which will *not* be in the live
1412 // registers: if passed to the call, the values passed will be marked via
1413 // MarkJitExitFrame, and no registers can be live after the instruction
1414 // except its outputs.
1415 LiveRegisterSet liveRegs_;
1416
1417 // The subset of liveRegs which contains gcthing pointers.
1418 LiveGeneralRegisterSet gcRegs_;
1419
1420 #ifdef CHECK_OSIPOINT_REGISTERS
1421 // Clobbered regs of the current instruction. This set is never written to
1422 // the safepoint; it's only used by assertions during compilation.
1423 LiveRegisterSet clobberedRegs_;
1424 #endif
1425
1426 // Offset to a position in the safepoint stream, or
1427 // INVALID_SAFEPOINT_OFFSET.
1428 uint32_t safepointOffset_;
1429
1430 // Assembler buffer displacement to OSI point's call location.
1431 uint32_t osiCallPointOffset_;
1432
1433 // List of slots which have gcthing pointers.
1434 SlotList gcSlots_;
1435
1436 #ifdef JS_NUNBOX32
1437 // List of registers (in liveRegs) and slots which contain pieces of Values.
1438 NunboxList nunboxParts_;
1439 #elif JS_PUNBOX64
1440 // List of slots which have Values.
1441 SlotList valueSlots_;
1442
1443 // The subset of liveRegs which have Values.
1444 LiveGeneralRegisterSet valueRegs_;
1445 #endif
1446
1447 // The subset of liveRegs which contains pointers to slots/elements.
1448 LiveGeneralRegisterSet slotsOrElementsRegs_;
1449
1450 // List of slots which have slots/elements pointers.
1451 SlotList slotsOrElementsSlots_;
1452
1453 // Wasm only: with what kind of instruction is this LSafepoint associated?
1454 // true => wasm trap, false => wasm call.
1455 bool isWasmTrap_;
1456
1457 // Wasm only: what is the value of masm.framePushed() that corresponds to
1458 // the lowest-addressed word covered by the StackMap that we will generate
1459 // from this LSafepoint? This depends on the instruction:
1460 //
1461 // if isWasmTrap_ == true:
1462 // masm.framePushed() unmodified. Note that when constructing the
1463 // StackMap we will add entries below this point to take account of
1464 // registers dumped on the stack as a result of the trap.
1465 //
1466 // if isWasmTrap_ == false:
1467 // masm.framePushed() - StackArgAreaSizeUnaligned(arg types for the call),
1468 // because the map does not include the outgoing args themselves, but
1469 // it does cover any and all alignment space above them.
1470 uint32_t framePushedAtStackMapBase_;
1471
1472 public:
assertInvariants()1473 void assertInvariants() {
1474 // Every register in valueRegs and gcRegs should also be in liveRegs.
1475 #ifndef JS_NUNBOX32
1476 MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1477 #endif
1478 MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
1479 }
1480
LSafepoint(TempAllocator & alloc)1481 explicit LSafepoint(TempAllocator& alloc)
1482 : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
1483 osiCallPointOffset_(0),
1484 gcSlots_(alloc),
1485 #ifdef JS_NUNBOX32
1486 nunboxParts_(alloc),
1487 #else
1488 valueSlots_(alloc),
1489 #endif
1490 slotsOrElementsSlots_(alloc),
1491 isWasmTrap_(false),
1492 framePushedAtStackMapBase_(0) {
1493 assertInvariants();
1494 }
addLiveRegister(AnyRegister reg)1495 void addLiveRegister(AnyRegister reg) {
1496 liveRegs_.addUnchecked(reg);
1497 assertInvariants();
1498 }
liveRegs()1499 const LiveRegisterSet& liveRegs() const { return liveRegs_; }
1500 #ifdef CHECK_OSIPOINT_REGISTERS
addClobberedRegister(AnyRegister reg)1501 void addClobberedRegister(AnyRegister reg) {
1502 clobberedRegs_.addUnchecked(reg);
1503 assertInvariants();
1504 }
clobberedRegs()1505 const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
1506 #endif
addGcRegister(Register reg)1507 void addGcRegister(Register reg) {
1508 gcRegs_.addUnchecked(reg);
1509 assertInvariants();
1510 }
gcRegs()1511 LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
addGcSlot(bool stack,uint32_t slot)1512 [[nodiscard]] bool addGcSlot(bool stack, uint32_t slot) {
1513 bool result = gcSlots_.append(SlotEntry(stack, slot));
1514 if (result) {
1515 assertInvariants();
1516 }
1517 return result;
1518 }
gcSlots()1519 SlotList& gcSlots() { return gcSlots_; }
1520
slotsOrElementsSlots()1521 SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
slotsOrElementsRegs()1522 LiveGeneralRegisterSet slotsOrElementsRegs() const {
1523 return slotsOrElementsRegs_;
1524 }
addSlotsOrElementsRegister(Register reg)1525 void addSlotsOrElementsRegister(Register reg) {
1526 slotsOrElementsRegs_.addUnchecked(reg);
1527 assertInvariants();
1528 }
addSlotsOrElementsSlot(bool stack,uint32_t slot)1529 [[nodiscard]] bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
1530 bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
1531 if (result) {
1532 assertInvariants();
1533 }
1534 return result;
1535 }
addSlotsOrElementsPointer(LAllocation alloc)1536 [[nodiscard]] bool addSlotsOrElementsPointer(LAllocation alloc) {
1537 if (alloc.isMemory()) {
1538 return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
1539 }
1540 MOZ_ASSERT(alloc.isRegister());
1541 addSlotsOrElementsRegister(alloc.toRegister().gpr());
1542 assertInvariants();
1543 return true;
1544 }
hasSlotsOrElementsPointer(LAllocation alloc)1545 bool hasSlotsOrElementsPointer(LAllocation alloc) const {
1546 if (alloc.isRegister()) {
1547 return slotsOrElementsRegs().has(alloc.toRegister().gpr());
1548 }
1549 for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
1550 const SlotEntry& entry = slotsOrElementsSlots_[i];
1551 if (entry.stack == alloc.isStackSlot() &&
1552 entry.slot == alloc.memorySlot()) {
1553 return true;
1554 }
1555 }
1556 return false;
1557 }
1558
addGcPointer(LAllocation alloc)1559 [[nodiscard]] bool addGcPointer(LAllocation alloc) {
1560 if (alloc.isMemory()) {
1561 return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
1562 }
1563 if (alloc.isRegister()) {
1564 addGcRegister(alloc.toRegister().gpr());
1565 }
1566 assertInvariants();
1567 return true;
1568 }
1569
hasGcPointer(LAllocation alloc)1570 bool hasGcPointer(LAllocation alloc) const {
1571 if (alloc.isRegister()) {
1572 return gcRegs().has(alloc.toRegister().gpr());
1573 }
1574 MOZ_ASSERT(alloc.isMemory());
1575 for (size_t i = 0; i < gcSlots_.length(); i++) {
1576 if (gcSlots_[i].stack == alloc.isStackSlot() &&
1577 gcSlots_[i].slot == alloc.memorySlot()) {
1578 return true;
1579 }
1580 }
1581 return false;
1582 }
1583
1584 // Return true if all GC-managed pointers from `alloc` are recorded in this
1585 // safepoint.
hasAllGcPointersFromStackArea(LAllocation alloc)1586 bool hasAllGcPointersFromStackArea(LAllocation alloc) const {
1587 for (LStackArea::ResultIterator iter = alloc.toStackArea()->results(); iter;
1588 iter.next()) {
1589 if (iter.isGcPointer() && !hasGcPointer(iter.alloc())) {
1590 return false;
1591 }
1592 }
1593 return true;
1594 }
1595
1596 #ifdef JS_NUNBOX32
addNunboxParts(uint32_t typeVreg,LAllocation type,LAllocation payload)1597 [[nodiscard]] bool addNunboxParts(uint32_t typeVreg, LAllocation type,
1598 LAllocation payload) {
1599 bool result = nunboxParts_.append(NunboxEntry(typeVreg, type, payload));
1600 if (result) {
1601 assertInvariants();
1602 }
1603 return result;
1604 }
1605
addNunboxType(uint32_t typeVreg,LAllocation type)1606 [[nodiscard]] bool addNunboxType(uint32_t typeVreg, LAllocation type) {
1607 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1608 if (nunboxParts_[i].type == type) {
1609 return true;
1610 }
1611 if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) {
1612 nunboxParts_[i].type = type;
1613 return true;
1614 }
1615 }
1616
1617 // vregs for nunbox pairs are adjacent, with the type coming first.
1618 uint32_t payloadVreg = typeVreg + 1;
1619 bool result = nunboxParts_.append(
1620 NunboxEntry(typeVreg, type, LUse(payloadVreg, LUse::ANY)));
1621 if (result) {
1622 assertInvariants();
1623 }
1624 return result;
1625 }
1626
addNunboxPayload(uint32_t payloadVreg,LAllocation payload)1627 [[nodiscard]] bool addNunboxPayload(uint32_t payloadVreg,
1628 LAllocation payload) {
1629 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1630 if (nunboxParts_[i].payload == payload) {
1631 return true;
1632 }
1633 if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) {
1634 nunboxParts_[i].payload = payload;
1635 return true;
1636 }
1637 }
1638
1639 // vregs for nunbox pairs are adjacent, with the type coming first.
1640 uint32_t typeVreg = payloadVreg - 1;
1641 bool result = nunboxParts_.append(
1642 NunboxEntry(typeVreg, LUse(typeVreg, LUse::ANY), payload));
1643 if (result) {
1644 assertInvariants();
1645 }
1646 return result;
1647 }
1648
findTypeAllocation(uint32_t typeVreg)1649 LAllocation findTypeAllocation(uint32_t typeVreg) {
1650 // Look for some allocation for the specified type vreg, to go with a
1651 // partial nunbox entry for the payload. Note that we don't need to
1652 // look at the value slots in the safepoint, as these aren't used by
1653 // register allocators which add partial nunbox entries.
1654 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1655 if (nunboxParts_[i].typeVreg == typeVreg &&
1656 !nunboxParts_[i].type.isUse()) {
1657 return nunboxParts_[i].type;
1658 }
1659 }
1660 return LUse(typeVreg, LUse::ANY);
1661 }
1662
1663 # ifdef DEBUG
hasNunboxPayload(LAllocation payload)1664 bool hasNunboxPayload(LAllocation payload) const {
1665 for (size_t i = 0; i < nunboxParts_.length(); i++) {
1666 if (nunboxParts_[i].payload == payload) {
1667 return true;
1668 }
1669 }
1670 return false;
1671 }
1672 # endif
1673
nunboxParts()1674 NunboxList& nunboxParts() { return nunboxParts_; }
1675
1676 #elif JS_PUNBOX64
addValueSlot(bool stack,uint32_t slot)1677 [[nodiscard]] bool addValueSlot(bool stack, uint32_t slot) {
1678 bool result = valueSlots_.append(SlotEntry(stack, slot));
1679 if (result) {
1680 assertInvariants();
1681 }
1682 return result;
1683 }
valueSlots()1684 SlotList& valueSlots() { return valueSlots_; }
1685
hasValueSlot(bool stack,uint32_t slot)1686 bool hasValueSlot(bool stack, uint32_t slot) const {
1687 for (size_t i = 0; i < valueSlots_.length(); i++) {
1688 if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot) {
1689 return true;
1690 }
1691 }
1692 return false;
1693 }
1694
addValueRegister(Register reg)1695 void addValueRegister(Register reg) {
1696 valueRegs_.add(reg);
1697 assertInvariants();
1698 }
valueRegs()1699 LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
1700
addBoxedValue(LAllocation alloc)1701 [[nodiscard]] bool addBoxedValue(LAllocation alloc) {
1702 if (alloc.isRegister()) {
1703 Register reg = alloc.toRegister().gpr();
1704 if (!valueRegs().has(reg)) {
1705 addValueRegister(reg);
1706 }
1707 return true;
1708 }
1709 if (hasValueSlot(alloc.isStackSlot(), alloc.memorySlot())) {
1710 return true;
1711 }
1712 return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1713 }
1714
hasBoxedValue(LAllocation alloc)1715 bool hasBoxedValue(LAllocation alloc) const {
1716 if (alloc.isRegister()) {
1717 return valueRegs().has(alloc.toRegister().gpr());
1718 }
1719 return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
1720 }
1721
1722 #endif // JS_PUNBOX64
1723
encoded()1724 bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
offset()1725 uint32_t offset() const {
1726 MOZ_ASSERT(encoded());
1727 return safepointOffset_;
1728 }
setOffset(uint32_t offset)1729 void setOffset(uint32_t offset) { safepointOffset_ = offset; }
osiReturnPointOffset()1730 uint32_t osiReturnPointOffset() const {
1731 // In general, pointer arithmetic on code is bad, but in this case,
1732 // getting the return address from a call instruction, stepping over pools
1733 // would be wrong.
1734 return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
1735 }
osiCallPointOffset()1736 uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
setOsiCallPointOffset(uint32_t osiCallPointOffset)1737 void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
1738 MOZ_ASSERT(!osiCallPointOffset_);
1739 osiCallPointOffset_ = osiCallPointOffset;
1740 }
1741
isWasmTrap()1742 bool isWasmTrap() const { return isWasmTrap_; }
setIsWasmTrap()1743 void setIsWasmTrap() { isWasmTrap_ = true; }
1744
framePushedAtStackMapBase()1745 uint32_t framePushedAtStackMapBase() const {
1746 return framePushedAtStackMapBase_;
1747 }
setFramePushedAtStackMapBase(uint32_t n)1748 void setFramePushedAtStackMapBase(uint32_t n) {
1749 MOZ_ASSERT(framePushedAtStackMapBase_ == 0);
1750 framePushedAtStackMapBase_ = n;
1751 }
1752 };
1753
1754 class LInstruction::InputIterator {
1755 private:
1756 LInstruction& ins_;
1757 size_t idx_;
1758 bool snapshot_;
1759
handleOperandsEnd()1760 void handleOperandsEnd() {
1761 // Iterate on the snapshot when iteration over all operands is done.
1762 if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
1763 idx_ = 0;
1764 snapshot_ = true;
1765 }
1766 }
1767
1768 public:
InputIterator(LInstruction & ins)1769 explicit InputIterator(LInstruction& ins)
1770 : ins_(ins), idx_(0), snapshot_(false) {
1771 handleOperandsEnd();
1772 }
1773
more()1774 bool more() const {
1775 if (snapshot_) {
1776 return idx_ < ins_.snapshot()->numEntries();
1777 }
1778 if (idx_ < ins_.numOperands()) {
1779 return true;
1780 }
1781 if (ins_.snapshot() && ins_.snapshot()->numEntries()) {
1782 return true;
1783 }
1784 return false;
1785 }
1786
isSnapshotInput()1787 bool isSnapshotInput() const { return snapshot_; }
1788
next()1789 void next() {
1790 MOZ_ASSERT(more());
1791 idx_++;
1792 handleOperandsEnd();
1793 }
1794
replace(const LAllocation & alloc)1795 void replace(const LAllocation& alloc) {
1796 if (snapshot_) {
1797 ins_.snapshot()->setEntry(idx_, alloc);
1798 } else {
1799 ins_.setOperand(idx_, alloc);
1800 }
1801 }
1802
1803 LAllocation* operator*() const {
1804 if (snapshot_) {
1805 return ins_.snapshot()->getEntry(idx_);
1806 }
1807 return ins_.getOperand(idx_);
1808 }
1809
1810 LAllocation* operator->() const { return **this; }
1811 };
1812
1813 class LIRGraph {
1814 struct ValueHasher {
1815 using Lookup = Value;
hashValueHasher1816 static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
matchValueHasher1817 static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
1818 };
1819
1820 FixedList<LBlock> blocks_;
1821
1822 // constantPool_ is a mozilla::Vector, not a js::Vector, because
1823 // js::Vector<Value> is prohibited as unsafe. This particular Vector of
1824 // Values is safe because it is only used within the scope of an
1825 // AutoSuppressGC (in IonCompile), which inhibits GC.
1826 mozilla::Vector<Value, 0, JitAllocPolicy> constantPool_;
1827 typedef HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy> ConstantPoolMap;
1828 ConstantPoolMap constantPoolMap_;
1829 Vector<LInstruction*, 0, JitAllocPolicy> safepoints_;
1830 Vector<LInstruction*, 0, JitAllocPolicy> nonCallSafepoints_;
1831 uint32_t numVirtualRegisters_;
1832 uint32_t numInstructions_;
1833
1834 // Number of stack slots needed for local spills.
1835 uint32_t localSlotCount_;
1836 // Number of stack slots needed for argument construction for calls.
1837 uint32_t argumentSlotCount_;
1838
1839 MIRGraph& mir_;
1840
1841 public:
1842 explicit LIRGraph(MIRGraph* mir);
1843
init()1844 [[nodiscard]] bool init() {
1845 return blocks_.init(mir_.alloc(), mir_.numBlocks());
1846 }
mir()1847 MIRGraph& mir() const { return mir_; }
numBlocks()1848 size_t numBlocks() const { return blocks_.length(); }
getBlock(size_t i)1849 LBlock* getBlock(size_t i) { return &blocks_[i]; }
numBlockIds()1850 uint32_t numBlockIds() const { return mir_.numBlockIds(); }
initBlock(MBasicBlock * mir)1851 [[nodiscard]] bool initBlock(MBasicBlock* mir) {
1852 auto* block = &blocks_[mir->id()];
1853 auto* lir = new (block) LBlock(mir);
1854 return lir->init(mir_.alloc());
1855 }
getVirtualRegister()1856 uint32_t getVirtualRegister() {
1857 numVirtualRegisters_ += VREG_INCREMENT;
1858 return numVirtualRegisters_;
1859 }
numVirtualRegisters()1860 uint32_t numVirtualRegisters() const {
1861 // Virtual registers are 1-based, not 0-based, so add one as a
1862 // convenience for 0-based arrays.
1863 return numVirtualRegisters_ + 1;
1864 }
getInstructionId()1865 uint32_t getInstructionId() { return numInstructions_++; }
numInstructions()1866 uint32_t numInstructions() const { return numInstructions_; }
setLocalSlotCount(uint32_t localSlotCount)1867 void setLocalSlotCount(uint32_t localSlotCount) {
1868 localSlotCount_ = localSlotCount;
1869 }
localSlotCount()1870 uint32_t localSlotCount() const { return localSlotCount_; }
1871 // Return the localSlotCount() value rounded up so that it satisfies the
1872 // platform stack alignment requirement, and so that it's a multiple of
1873 // the number of slots per Value.
paddedLocalSlotCount()1874 uint32_t paddedLocalSlotCount() const {
1875 // Round to JitStackAlignment, and implicitly to sizeof(Value) as
1876 // JitStackAlignment is a multiple of sizeof(Value). These alignments
1877 // are needed for spilling SIMD registers properly, and for
1878 // StackOffsetOfPassedArg which rounds argument slots to 8-byte
1879 // boundaries.
1880 return AlignBytes(localSlotCount(), JitStackAlignment);
1881 }
paddedLocalSlotsSize()1882 size_t paddedLocalSlotsSize() const { return paddedLocalSlotCount(); }
setArgumentSlotCount(uint32_t argumentSlotCount)1883 void setArgumentSlotCount(uint32_t argumentSlotCount) {
1884 argumentSlotCount_ = argumentSlotCount;
1885 }
argumentSlotCount()1886 uint32_t argumentSlotCount() const { return argumentSlotCount_; }
argumentsSize()1887 size_t argumentsSize() const { return argumentSlotCount() * sizeof(Value); }
totalSlotCount()1888 uint32_t totalSlotCount() const {
1889 return paddedLocalSlotCount() + argumentsSize();
1890 }
1891 [[nodiscard]] bool addConstantToPool(const Value& v, uint32_t* index);
numConstants()1892 size_t numConstants() const { return constantPool_.length(); }
constantPool()1893 Value* constantPool() { return &constantPool_[0]; }
1894
1895 bool noteNeedsSafepoint(LInstruction* ins);
numNonCallSafepoints()1896 size_t numNonCallSafepoints() const { return nonCallSafepoints_.length(); }
getNonCallSafepoint(size_t i)1897 LInstruction* getNonCallSafepoint(size_t i) const {
1898 return nonCallSafepoints_[i];
1899 }
numSafepoints()1900 size_t numSafepoints() const { return safepoints_.length(); }
getSafepoint(size_t i)1901 LInstruction* getSafepoint(size_t i) const { return safepoints_[i]; }
1902
1903 #ifdef JS_JITSPEW
1904 void dump(GenericPrinter& out);
1905 void dump();
1906 #endif
1907 };
1908
LAllocation(AnyRegister reg)1909 LAllocation::LAllocation(AnyRegister reg) {
1910 if (reg.isFloat()) {
1911 *this = LFloatReg(reg.fpu());
1912 } else {
1913 *this = LGeneralReg(reg.gpr());
1914 }
1915 }
1916
toRegister()1917 AnyRegister LAllocation::toRegister() const {
1918 MOZ_ASSERT(isRegister());
1919 if (isFloatReg()) {
1920 return AnyRegister(toFloatReg()->reg());
1921 }
1922 return AnyRegister(toGeneralReg()->reg());
1923 }
1924
1925 } // namespace jit
1926 } // namespace js
1927
1928 #include "jit/shared/LIR-shared.h"
1929 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1930 # if defined(JS_CODEGEN_X86)
1931 # include "jit/x86/LIR-x86.h"
1932 # elif defined(JS_CODEGEN_X64)
1933 # include "jit/x64/LIR-x64.h"
1934 # endif
1935 # include "jit/x86-shared/LIR-x86-shared.h"
1936 #elif defined(JS_CODEGEN_ARM)
1937 # include "jit/arm/LIR-arm.h"
1938 #elif defined(JS_CODEGEN_ARM64)
1939 # include "jit/arm64/LIR-arm64.h"
1940 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1941 # if defined(JS_CODEGEN_MIPS32)
1942 # include "jit/mips32/LIR-mips32.h"
1943 # elif defined(JS_CODEGEN_MIPS64)
1944 # include "jit/mips64/LIR-mips64.h"
1945 # endif
1946 # include "jit/mips-shared/LIR-mips-shared.h"
1947 #elif defined(JS_CODEGEN_NONE)
1948 # include "jit/none/LIR-none.h"
1949 #else
1950 # error "Unknown architecture!"
1951 #endif
1952
1953 #undef LIR_HEADER
1954
1955 namespace js {
1956 namespace jit {
1957
1958 #define LIROP(name) \
1959 L##name* LNode::to##name() { \
1960 MOZ_ASSERT(is##name()); \
1961 return static_cast<L##name*>(this); \
1962 } \
1963 const L##name* LNode::to##name() const { \
1964 MOZ_ASSERT(is##name()); \
1965 return static_cast<const L##name*>(this); \
1966 }
1967 LIR_OPCODE_LIST(LIROP)
1968 #undef LIROP
1969
1970 #define LALLOC_CAST(type) \
1971 L##type* LAllocation::to##type() { \
1972 MOZ_ASSERT(is##type()); \
1973 return static_cast<L##type*>(this); \
1974 }
1975 #define LALLOC_CONST_CAST(type) \
1976 const L##type* LAllocation::to##type() const { \
1977 MOZ_ASSERT(is##type()); \
1978 return static_cast<const L##type*>(this); \
1979 }
1980
1981 LALLOC_CAST(Use)
1982 LALLOC_CONST_CAST(Use)
1983 LALLOC_CONST_CAST(GeneralReg)
1984 LALLOC_CONST_CAST(FloatReg)
1985 LALLOC_CONST_CAST(StackSlot)
1986 LALLOC_CAST(StackArea)
1987 LALLOC_CONST_CAST(StackArea)
1988 LALLOC_CONST_CAST(Argument)
1989 LALLOC_CONST_CAST(ConstantIndex)
1990
1991 #undef LALLOC_CAST
1992
1993 } // namespace jit
1994 } // namespace js
1995
1996 #endif /* jit_LIR_h */
1997